1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2007 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define FW_BUF_SIZE 0x10000
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.6.9"
60 #define DRV_MODULE_RELDATE "December 8, 2007"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static const char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 /* indexed by board_t, above */
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
107 static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
129 static struct flash_spec flash_table[] =
131 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
218 static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
238 diff = bp->tx_prod - bp->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
244 return (bp->tx_ring_size - diff);
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
252 spin_lock_bh(&bp->indirect_lock);
253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
262 spin_lock_bh(&bp->indirect_lock);
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265 spin_unlock_bh(&bp->indirect_lock);
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
272 spin_lock_bh(&bp->indirect_lock);
273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
276 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279 for (i = 0; i < 5; i++) {
281 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
287 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288 REG_WR(bp, BNX2_CTX_DATA, val);
290 spin_unlock_bh(&bp->indirect_lock);
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
299 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
303 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
309 val1 = (bp->phy_addr << 21) | (reg << 16) |
310 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311 BNX2_EMAC_MDIO_COMM_START_BUSY;
312 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
314 for (i = 0; i < 50; i++) {
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
337 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
341 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
366 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
371 for (i = 0; i < 50; i++) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
381 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
386 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
390 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400 bnx2_disable_int(struct bnx2 *bp)
402 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
408 bnx2_enable_int(struct bnx2 *bp)
410 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
411 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
412 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
414 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
415 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
417 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
421 bnx2_disable_int_sync(struct bnx2 *bp)
423 atomic_inc(&bp->intr_sem);
424 bnx2_disable_int(bp);
425 synchronize_irq(bp->pdev->irq);
429 bnx2_netif_stop(struct bnx2 *bp)
431 bnx2_disable_int_sync(bp);
432 if (netif_running(bp->dev)) {
433 napi_disable(&bp->napi);
434 netif_tx_disable(bp->dev);
435 bp->dev->trans_start = jiffies; /* prevent tx timeout */
440 bnx2_netif_start(struct bnx2 *bp)
442 if (atomic_dec_and_test(&bp->intr_sem)) {
443 if (netif_running(bp->dev)) {
444 netif_wake_queue(bp->dev);
445 napi_enable(&bp->napi);
452 bnx2_free_mem(struct bnx2 *bp)
456 for (i = 0; i < bp->ctx_pages; i++) {
457 if (bp->ctx_blk[i]) {
458 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
460 bp->ctx_blk_mapping[i]);
461 bp->ctx_blk[i] = NULL;
464 if (bp->status_blk) {
465 pci_free_consistent(bp->pdev, bp->status_stats_size,
466 bp->status_blk, bp->status_blk_mapping);
467 bp->status_blk = NULL;
468 bp->stats_blk = NULL;
470 if (bp->tx_desc_ring) {
471 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
472 bp->tx_desc_ring, bp->tx_desc_mapping);
473 bp->tx_desc_ring = NULL;
475 kfree(bp->tx_buf_ring);
476 bp->tx_buf_ring = NULL;
477 for (i = 0; i < bp->rx_max_ring; i++) {
478 if (bp->rx_desc_ring[i])
479 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
481 bp->rx_desc_mapping[i]);
482 bp->rx_desc_ring[i] = NULL;
484 vfree(bp->rx_buf_ring);
485 bp->rx_buf_ring = NULL;
486 for (i = 0; i < bp->rx_max_pg_ring; i++) {
487 if (bp->rx_pg_desc_ring[i])
488 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
489 bp->rx_pg_desc_ring[i],
490 bp->rx_pg_desc_mapping[i]);
491 bp->rx_pg_desc_ring[i] = NULL;
494 vfree(bp->rx_pg_ring);
495 bp->rx_pg_ring = NULL;
499 bnx2_alloc_mem(struct bnx2 *bp)
501 int i, status_blk_size;
503 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
504 if (bp->tx_buf_ring == NULL)
507 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
508 &bp->tx_desc_mapping);
509 if (bp->tx_desc_ring == NULL)
512 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
513 if (bp->rx_buf_ring == NULL)
516 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
518 for (i = 0; i < bp->rx_max_ring; i++) {
519 bp->rx_desc_ring[i] =
520 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
521 &bp->rx_desc_mapping[i]);
522 if (bp->rx_desc_ring[i] == NULL)
527 if (bp->rx_pg_ring_size) {
528 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
530 if (bp->rx_pg_ring == NULL)
533 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
537 for (i = 0; i < bp->rx_max_pg_ring; i++) {
538 bp->rx_pg_desc_ring[i] =
539 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
540 &bp->rx_pg_desc_mapping[i]);
541 if (bp->rx_pg_desc_ring[i] == NULL)
546 /* Combine status and statistics blocks into one allocation. */
547 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
548 bp->status_stats_size = status_blk_size +
549 sizeof(struct statistics_block);
551 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
552 &bp->status_blk_mapping);
553 if (bp->status_blk == NULL)
556 memset(bp->status_blk, 0, bp->status_stats_size);
558 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
561 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
563 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
564 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
565 if (bp->ctx_pages == 0)
567 for (i = 0; i < bp->ctx_pages; i++) {
568 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
570 &bp->ctx_blk_mapping[i]);
571 if (bp->ctx_blk[i] == NULL)
583 bnx2_report_fw_link(struct bnx2 *bp)
585 u32 fw_link_status = 0;
587 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
593 switch (bp->line_speed) {
595 if (bp->duplex == DUPLEX_HALF)
596 fw_link_status = BNX2_LINK_STATUS_10HALF;
598 fw_link_status = BNX2_LINK_STATUS_10FULL;
601 if (bp->duplex == DUPLEX_HALF)
602 fw_link_status = BNX2_LINK_STATUS_100HALF;
604 fw_link_status = BNX2_LINK_STATUS_100FULL;
607 if (bp->duplex == DUPLEX_HALF)
608 fw_link_status = BNX2_LINK_STATUS_1000HALF;
610 fw_link_status = BNX2_LINK_STATUS_1000FULL;
613 if (bp->duplex == DUPLEX_HALF)
614 fw_link_status = BNX2_LINK_STATUS_2500HALF;
616 fw_link_status = BNX2_LINK_STATUS_2500FULL;
620 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
623 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
625 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
626 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
628 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
629 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
630 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
632 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
636 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
638 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
642 bnx2_xceiver_str(struct bnx2 *bp)
644 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
645 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
650 bnx2_report_link(struct bnx2 *bp)
653 netif_carrier_on(bp->dev);
654 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
655 bnx2_xceiver_str(bp));
657 printk("%d Mbps ", bp->line_speed);
659 if (bp->duplex == DUPLEX_FULL)
660 printk("full duplex");
662 printk("half duplex");
665 if (bp->flow_ctrl & FLOW_CTRL_RX) {
666 printk(", receive ");
667 if (bp->flow_ctrl & FLOW_CTRL_TX)
668 printk("& transmit ");
671 printk(", transmit ");
673 printk("flow control ON");
678 netif_carrier_off(bp->dev);
679 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
680 bnx2_xceiver_str(bp));
683 bnx2_report_fw_link(bp);
687 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
689 u32 local_adv, remote_adv;
692 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
693 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
695 if (bp->duplex == DUPLEX_FULL) {
696 bp->flow_ctrl = bp->req_flow_ctrl;
701 if (bp->duplex != DUPLEX_FULL) {
705 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
706 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
709 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
710 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
711 bp->flow_ctrl |= FLOW_CTRL_TX;
712 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
713 bp->flow_ctrl |= FLOW_CTRL_RX;
717 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
718 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
720 if (bp->phy_flags & PHY_SERDES_FLAG) {
721 u32 new_local_adv = 0;
722 u32 new_remote_adv = 0;
724 if (local_adv & ADVERTISE_1000XPAUSE)
725 new_local_adv |= ADVERTISE_PAUSE_CAP;
726 if (local_adv & ADVERTISE_1000XPSE_ASYM)
727 new_local_adv |= ADVERTISE_PAUSE_ASYM;
728 if (remote_adv & ADVERTISE_1000XPAUSE)
729 new_remote_adv |= ADVERTISE_PAUSE_CAP;
730 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
731 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
733 local_adv = new_local_adv;
734 remote_adv = new_remote_adv;
737 /* See Table 28B-3 of 802.3ab-1999 spec. */
738 if (local_adv & ADVERTISE_PAUSE_CAP) {
739 if(local_adv & ADVERTISE_PAUSE_ASYM) {
740 if (remote_adv & ADVERTISE_PAUSE_CAP) {
741 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
743 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
744 bp->flow_ctrl = FLOW_CTRL_RX;
748 if (remote_adv & ADVERTISE_PAUSE_CAP) {
749 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
753 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
754 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
755 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
757 bp->flow_ctrl = FLOW_CTRL_TX;
763 bnx2_5709s_linkup(struct bnx2 *bp)
769 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
770 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
771 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
773 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
774 bp->line_speed = bp->req_line_speed;
775 bp->duplex = bp->req_duplex;
778 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
780 case MII_BNX2_GP_TOP_AN_SPEED_10:
781 bp->line_speed = SPEED_10;
783 case MII_BNX2_GP_TOP_AN_SPEED_100:
784 bp->line_speed = SPEED_100;
786 case MII_BNX2_GP_TOP_AN_SPEED_1G:
787 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
788 bp->line_speed = SPEED_1000;
790 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
791 bp->line_speed = SPEED_2500;
794 if (val & MII_BNX2_GP_TOP_AN_FD)
795 bp->duplex = DUPLEX_FULL;
797 bp->duplex = DUPLEX_HALF;
802 bnx2_5708s_linkup(struct bnx2 *bp)
807 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
808 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
809 case BCM5708S_1000X_STAT1_SPEED_10:
810 bp->line_speed = SPEED_10;
812 case BCM5708S_1000X_STAT1_SPEED_100:
813 bp->line_speed = SPEED_100;
815 case BCM5708S_1000X_STAT1_SPEED_1G:
816 bp->line_speed = SPEED_1000;
818 case BCM5708S_1000X_STAT1_SPEED_2G5:
819 bp->line_speed = SPEED_2500;
822 if (val & BCM5708S_1000X_STAT1_FD)
823 bp->duplex = DUPLEX_FULL;
825 bp->duplex = DUPLEX_HALF;
831 bnx2_5706s_linkup(struct bnx2 *bp)
833 u32 bmcr, local_adv, remote_adv, common;
836 bp->line_speed = SPEED_1000;
838 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
839 if (bmcr & BMCR_FULLDPLX) {
840 bp->duplex = DUPLEX_FULL;
843 bp->duplex = DUPLEX_HALF;
846 if (!(bmcr & BMCR_ANENABLE)) {
850 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
851 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
853 common = local_adv & remote_adv;
854 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
856 if (common & ADVERTISE_1000XFULL) {
857 bp->duplex = DUPLEX_FULL;
860 bp->duplex = DUPLEX_HALF;
868 bnx2_copper_linkup(struct bnx2 *bp)
872 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
873 if (bmcr & BMCR_ANENABLE) {
874 u32 local_adv, remote_adv, common;
876 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
877 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
879 common = local_adv & (remote_adv >> 2);
880 if (common & ADVERTISE_1000FULL) {
881 bp->line_speed = SPEED_1000;
882 bp->duplex = DUPLEX_FULL;
884 else if (common & ADVERTISE_1000HALF) {
885 bp->line_speed = SPEED_1000;
886 bp->duplex = DUPLEX_HALF;
889 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
890 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
892 common = local_adv & remote_adv;
893 if (common & ADVERTISE_100FULL) {
894 bp->line_speed = SPEED_100;
895 bp->duplex = DUPLEX_FULL;
897 else if (common & ADVERTISE_100HALF) {
898 bp->line_speed = SPEED_100;
899 bp->duplex = DUPLEX_HALF;
901 else if (common & ADVERTISE_10FULL) {
902 bp->line_speed = SPEED_10;
903 bp->duplex = DUPLEX_FULL;
905 else if (common & ADVERTISE_10HALF) {
906 bp->line_speed = SPEED_10;
907 bp->duplex = DUPLEX_HALF;
916 if (bmcr & BMCR_SPEED100) {
917 bp->line_speed = SPEED_100;
920 bp->line_speed = SPEED_10;
922 if (bmcr & BMCR_FULLDPLX) {
923 bp->duplex = DUPLEX_FULL;
926 bp->duplex = DUPLEX_HALF;
934 bnx2_set_mac_link(struct bnx2 *bp)
938 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
939 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
940 (bp->duplex == DUPLEX_HALF)) {
941 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
944 /* Configure the EMAC mode register. */
945 val = REG_RD(bp, BNX2_EMAC_MODE);
947 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
948 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
949 BNX2_EMAC_MODE_25G_MODE);
952 switch (bp->line_speed) {
954 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
955 val |= BNX2_EMAC_MODE_PORT_MII_10M;
960 val |= BNX2_EMAC_MODE_PORT_MII;
963 val |= BNX2_EMAC_MODE_25G_MODE;
966 val |= BNX2_EMAC_MODE_PORT_GMII;
971 val |= BNX2_EMAC_MODE_PORT_GMII;
974 /* Set the MAC to operate in the appropriate duplex mode. */
975 if (bp->duplex == DUPLEX_HALF)
976 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
977 REG_WR(bp, BNX2_EMAC_MODE, val);
979 /* Enable/disable rx PAUSE. */
980 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
982 if (bp->flow_ctrl & FLOW_CTRL_RX)
983 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
984 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
986 /* Enable/disable tx PAUSE. */
987 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
988 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
990 if (bp->flow_ctrl & FLOW_CTRL_TX)
991 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
992 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
994 /* Acknowledge the interrupt. */
995 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1001 bnx2_enable_bmsr1(struct bnx2 *bp)
1003 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1004 (CHIP_NUM(bp) == CHIP_NUM_5709))
1005 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1006 MII_BNX2_BLK_ADDR_GP_STATUS);
1010 bnx2_disable_bmsr1(struct bnx2 *bp)
1012 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1013 (CHIP_NUM(bp) == CHIP_NUM_5709))
1014 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1015 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1019 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1024 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1027 if (bp->autoneg & AUTONEG_SPEED)
1028 bp->advertising |= ADVERTISED_2500baseX_Full;
1030 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1031 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1033 bnx2_read_phy(bp, bp->mii_up1, &up1);
1034 if (!(up1 & BCM5708S_UP1_2G5)) {
1035 up1 |= BCM5708S_UP1_2G5;
1036 bnx2_write_phy(bp, bp->mii_up1, up1);
1040 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1041 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1042 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1048 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1053 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1056 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1057 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1059 bnx2_read_phy(bp, bp->mii_up1, &up1);
1060 if (up1 & BCM5708S_UP1_2G5) {
1061 up1 &= ~BCM5708S_UP1_2G5;
1062 bnx2_write_phy(bp, bp->mii_up1, up1);
1066 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1067 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1068 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1074 bnx2_enable_forced_2g5(struct bnx2 *bp)
1078 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1081 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1084 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1085 MII_BNX2_BLK_ADDR_SERDES_DIG);
1086 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1087 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1088 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1089 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1091 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1095 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1096 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1097 bmcr |= BCM5708S_BMCR_FORCE_2500;
1100 if (bp->autoneg & AUTONEG_SPEED) {
1101 bmcr &= ~BMCR_ANENABLE;
1102 if (bp->req_duplex == DUPLEX_FULL)
1103 bmcr |= BMCR_FULLDPLX;
1105 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1109 bnx2_disable_forced_2g5(struct bnx2 *bp)
1113 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1116 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1119 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1120 MII_BNX2_BLK_ADDR_SERDES_DIG);
1121 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1122 val &= ~MII_BNX2_SD_MISC1_FORCE;
1123 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1125 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1126 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1127 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1129 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1130 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1131 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1134 if (bp->autoneg & AUTONEG_SPEED)
1135 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1136 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1140 bnx2_set_link(struct bnx2 *bp)
1145 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1150 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1153 link_up = bp->link_up;
1155 bnx2_enable_bmsr1(bp);
1156 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1157 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1158 bnx2_disable_bmsr1(bp);
1160 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1161 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1164 val = REG_RD(bp, BNX2_EMAC_STATUS);
1165 if (val & BNX2_EMAC_STATUS_LINK)
1166 bmsr |= BMSR_LSTATUS;
1168 bmsr &= ~BMSR_LSTATUS;
1171 if (bmsr & BMSR_LSTATUS) {
1174 if (bp->phy_flags & PHY_SERDES_FLAG) {
1175 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1176 bnx2_5706s_linkup(bp);
1177 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1178 bnx2_5708s_linkup(bp);
1179 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1180 bnx2_5709s_linkup(bp);
1183 bnx2_copper_linkup(bp);
1185 bnx2_resolve_flow_ctrl(bp);
1188 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1189 (bp->autoneg & AUTONEG_SPEED))
1190 bnx2_disable_forced_2g5(bp);
1192 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1196 if (bp->link_up != link_up) {
1197 bnx2_report_link(bp);
1200 bnx2_set_mac_link(bp);
1206 bnx2_reset_phy(struct bnx2 *bp)
1211 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1213 #define PHY_RESET_MAX_WAIT 100
1214 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1217 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1218 if (!(reg & BMCR_RESET)) {
1223 if (i == PHY_RESET_MAX_WAIT) {
1230 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1234 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1235 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1237 if (bp->phy_flags & PHY_SERDES_FLAG) {
1238 adv = ADVERTISE_1000XPAUSE;
1241 adv = ADVERTISE_PAUSE_CAP;
1244 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1245 if (bp->phy_flags & PHY_SERDES_FLAG) {
1246 adv = ADVERTISE_1000XPSE_ASYM;
1249 adv = ADVERTISE_PAUSE_ASYM;
1252 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1253 if (bp->phy_flags & PHY_SERDES_FLAG) {
1254 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1257 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1263 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1266 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1268 u32 speed_arg = 0, pause_adv;
1270 pause_adv = bnx2_phy_get_pause_adv(bp);
1272 if (bp->autoneg & AUTONEG_SPEED) {
1273 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1274 if (bp->advertising & ADVERTISED_10baseT_Half)
1275 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1276 if (bp->advertising & ADVERTISED_10baseT_Full)
1277 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1278 if (bp->advertising & ADVERTISED_100baseT_Half)
1279 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1280 if (bp->advertising & ADVERTISED_100baseT_Full)
1281 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1282 if (bp->advertising & ADVERTISED_1000baseT_Full)
1283 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1284 if (bp->advertising & ADVERTISED_2500baseX_Full)
1285 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1287 if (bp->req_line_speed == SPEED_2500)
1288 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1289 else if (bp->req_line_speed == SPEED_1000)
1290 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1291 else if (bp->req_line_speed == SPEED_100) {
1292 if (bp->req_duplex == DUPLEX_FULL)
1293 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1295 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1296 } else if (bp->req_line_speed == SPEED_10) {
1297 if (bp->req_duplex == DUPLEX_FULL)
1298 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1300 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1304 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1305 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1306 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1307 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1309 if (port == PORT_TP)
1310 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1311 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1313 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1315 spin_unlock_bh(&bp->phy_lock);
1316 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1317 spin_lock_bh(&bp->phy_lock);
1323 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1328 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1329 return (bnx2_setup_remote_phy(bp, port));
1331 if (!(bp->autoneg & AUTONEG_SPEED)) {
1333 int force_link_down = 0;
1335 if (bp->req_line_speed == SPEED_2500) {
1336 if (!bnx2_test_and_enable_2g5(bp))
1337 force_link_down = 1;
1338 } else if (bp->req_line_speed == SPEED_1000) {
1339 if (bnx2_test_and_disable_2g5(bp))
1340 force_link_down = 1;
1342 bnx2_read_phy(bp, bp->mii_adv, &adv);
1343 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1345 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1346 new_bmcr = bmcr & ~BMCR_ANENABLE;
1347 new_bmcr |= BMCR_SPEED1000;
1349 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1350 if (bp->req_line_speed == SPEED_2500)
1351 bnx2_enable_forced_2g5(bp);
1352 else if (bp->req_line_speed == SPEED_1000) {
1353 bnx2_disable_forced_2g5(bp);
1354 new_bmcr &= ~0x2000;
1357 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1358 if (bp->req_line_speed == SPEED_2500)
1359 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1361 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1364 if (bp->req_duplex == DUPLEX_FULL) {
1365 adv |= ADVERTISE_1000XFULL;
1366 new_bmcr |= BMCR_FULLDPLX;
1369 adv |= ADVERTISE_1000XHALF;
1370 new_bmcr &= ~BMCR_FULLDPLX;
1372 if ((new_bmcr != bmcr) || (force_link_down)) {
1373 /* Force a link down visible on the other side */
1375 bnx2_write_phy(bp, bp->mii_adv, adv &
1376 ~(ADVERTISE_1000XFULL |
1377 ADVERTISE_1000XHALF));
1378 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1379 BMCR_ANRESTART | BMCR_ANENABLE);
1382 netif_carrier_off(bp->dev);
1383 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1384 bnx2_report_link(bp);
1386 bnx2_write_phy(bp, bp->mii_adv, adv);
1387 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1389 bnx2_resolve_flow_ctrl(bp);
1390 bnx2_set_mac_link(bp);
1395 bnx2_test_and_enable_2g5(bp);
1397 if (bp->advertising & ADVERTISED_1000baseT_Full)
1398 new_adv |= ADVERTISE_1000XFULL;
1400 new_adv |= bnx2_phy_get_pause_adv(bp);
1402 bnx2_read_phy(bp, bp->mii_adv, &adv);
1403 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1405 bp->serdes_an_pending = 0;
1406 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1407 /* Force a link down visible on the other side */
1409 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1410 spin_unlock_bh(&bp->phy_lock);
1412 spin_lock_bh(&bp->phy_lock);
1415 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1416 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1418 /* Speed up link-up time when the link partner
1419 * does not autonegotiate which is very common
1420 * in blade servers. Some blade servers use
1421 * IPMI for kerboard input and it's important
1422 * to minimize link disruptions. Autoneg. involves
1423 * exchanging base pages plus 3 next pages and
1424 * normally completes in about 120 msec.
1426 bp->current_interval = SERDES_AN_TIMEOUT;
1427 bp->serdes_an_pending = 1;
1428 mod_timer(&bp->timer, jiffies + bp->current_interval);
1430 bnx2_resolve_flow_ctrl(bp);
1431 bnx2_set_mac_link(bp);
1437 #define ETHTOOL_ALL_FIBRE_SPEED \
1438 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1439 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1440 (ADVERTISED_1000baseT_Full)
1442 #define ETHTOOL_ALL_COPPER_SPEED \
1443 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1444 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1445 ADVERTISED_1000baseT_Full)
1447 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1448 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1450 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1453 bnx2_set_default_remote_link(struct bnx2 *bp)
1457 if (bp->phy_port == PORT_TP)
1458 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1460 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1462 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1463 bp->req_line_speed = 0;
1464 bp->autoneg |= AUTONEG_SPEED;
1465 bp->advertising = ADVERTISED_Autoneg;
1466 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1467 bp->advertising |= ADVERTISED_10baseT_Half;
1468 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1469 bp->advertising |= ADVERTISED_10baseT_Full;
1470 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1471 bp->advertising |= ADVERTISED_100baseT_Half;
1472 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1473 bp->advertising |= ADVERTISED_100baseT_Full;
1474 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1475 bp->advertising |= ADVERTISED_1000baseT_Full;
1476 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1477 bp->advertising |= ADVERTISED_2500baseX_Full;
1480 bp->advertising = 0;
1481 bp->req_duplex = DUPLEX_FULL;
1482 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1483 bp->req_line_speed = SPEED_10;
1484 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1485 bp->req_duplex = DUPLEX_HALF;
1487 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1488 bp->req_line_speed = SPEED_100;
1489 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1490 bp->req_duplex = DUPLEX_HALF;
1492 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1493 bp->req_line_speed = SPEED_1000;
1494 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1495 bp->req_line_speed = SPEED_2500;
1500 bnx2_set_default_link(struct bnx2 *bp)
1502 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1503 return bnx2_set_default_remote_link(bp);
1505 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1506 bp->req_line_speed = 0;
1507 if (bp->phy_flags & PHY_SERDES_FLAG) {
1510 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1512 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1513 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1514 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1516 bp->req_line_speed = bp->line_speed = SPEED_1000;
1517 bp->req_duplex = DUPLEX_FULL;
1520 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1524 bnx2_send_heart_beat(struct bnx2 *bp)
1529 spin_lock(&bp->indirect_lock);
1530 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1531 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1532 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1533 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1534 spin_unlock(&bp->indirect_lock);
1538 bnx2_remote_phy_event(struct bnx2 *bp)
1541 u8 link_up = bp->link_up;
1544 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1546 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1547 bnx2_send_heart_beat(bp);
1549 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1551 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1557 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1558 bp->duplex = DUPLEX_FULL;
1560 case BNX2_LINK_STATUS_10HALF:
1561 bp->duplex = DUPLEX_HALF;
1562 case BNX2_LINK_STATUS_10FULL:
1563 bp->line_speed = SPEED_10;
1565 case BNX2_LINK_STATUS_100HALF:
1566 bp->duplex = DUPLEX_HALF;
1567 case BNX2_LINK_STATUS_100BASE_T4:
1568 case BNX2_LINK_STATUS_100FULL:
1569 bp->line_speed = SPEED_100;
1571 case BNX2_LINK_STATUS_1000HALF:
1572 bp->duplex = DUPLEX_HALF;
1573 case BNX2_LINK_STATUS_1000FULL:
1574 bp->line_speed = SPEED_1000;
1576 case BNX2_LINK_STATUS_2500HALF:
1577 bp->duplex = DUPLEX_HALF;
1578 case BNX2_LINK_STATUS_2500FULL:
1579 bp->line_speed = SPEED_2500;
1586 spin_lock(&bp->phy_lock);
1588 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1589 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1590 if (bp->duplex == DUPLEX_FULL)
1591 bp->flow_ctrl = bp->req_flow_ctrl;
1593 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1594 bp->flow_ctrl |= FLOW_CTRL_TX;
1595 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1596 bp->flow_ctrl |= FLOW_CTRL_RX;
1599 old_port = bp->phy_port;
1600 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1601 bp->phy_port = PORT_FIBRE;
1603 bp->phy_port = PORT_TP;
1605 if (old_port != bp->phy_port)
1606 bnx2_set_default_link(bp);
1608 spin_unlock(&bp->phy_lock);
1610 if (bp->link_up != link_up)
1611 bnx2_report_link(bp);
1613 bnx2_set_mac_link(bp);
1617 bnx2_set_remote_link(struct bnx2 *bp)
1621 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1623 case BNX2_FW_EVT_CODE_LINK_EVENT:
1624 bnx2_remote_phy_event(bp);
1626 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1628 bnx2_send_heart_beat(bp);
1635 bnx2_setup_copper_phy(struct bnx2 *bp)
1640 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1642 if (bp->autoneg & AUTONEG_SPEED) {
1643 u32 adv_reg, adv1000_reg;
1644 u32 new_adv_reg = 0;
1645 u32 new_adv1000_reg = 0;
1647 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1648 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1649 ADVERTISE_PAUSE_ASYM);
1651 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1652 adv1000_reg &= PHY_ALL_1000_SPEED;
1654 if (bp->advertising & ADVERTISED_10baseT_Half)
1655 new_adv_reg |= ADVERTISE_10HALF;
1656 if (bp->advertising & ADVERTISED_10baseT_Full)
1657 new_adv_reg |= ADVERTISE_10FULL;
1658 if (bp->advertising & ADVERTISED_100baseT_Half)
1659 new_adv_reg |= ADVERTISE_100HALF;
1660 if (bp->advertising & ADVERTISED_100baseT_Full)
1661 new_adv_reg |= ADVERTISE_100FULL;
1662 if (bp->advertising & ADVERTISED_1000baseT_Full)
1663 new_adv1000_reg |= ADVERTISE_1000FULL;
1665 new_adv_reg |= ADVERTISE_CSMA;
1667 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1669 if ((adv1000_reg != new_adv1000_reg) ||
1670 (adv_reg != new_adv_reg) ||
1671 ((bmcr & BMCR_ANENABLE) == 0)) {
1673 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1674 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1675 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1678 else if (bp->link_up) {
1679 /* Flow ctrl may have changed from auto to forced */
1680 /* or vice-versa. */
1682 bnx2_resolve_flow_ctrl(bp);
1683 bnx2_set_mac_link(bp);
1689 if (bp->req_line_speed == SPEED_100) {
1690 new_bmcr |= BMCR_SPEED100;
1692 if (bp->req_duplex == DUPLEX_FULL) {
1693 new_bmcr |= BMCR_FULLDPLX;
1695 if (new_bmcr != bmcr) {
1698 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1699 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1701 if (bmsr & BMSR_LSTATUS) {
1702 /* Force link down */
1703 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1704 spin_unlock_bh(&bp->phy_lock);
1706 spin_lock_bh(&bp->phy_lock);
1708 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1709 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1712 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1714 /* Normally, the new speed is setup after the link has
1715 * gone down and up again. In some cases, link will not go
1716 * down so we need to set up the new speed here.
1718 if (bmsr & BMSR_LSTATUS) {
1719 bp->line_speed = bp->req_line_speed;
1720 bp->duplex = bp->req_duplex;
1721 bnx2_resolve_flow_ctrl(bp);
1722 bnx2_set_mac_link(bp);
1725 bnx2_resolve_flow_ctrl(bp);
1726 bnx2_set_mac_link(bp);
1732 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1734 if (bp->loopback == MAC_LOOPBACK)
1737 if (bp->phy_flags & PHY_SERDES_FLAG) {
1738 return (bnx2_setup_serdes_phy(bp, port));
1741 return (bnx2_setup_copper_phy(bp));
1746 bnx2_init_5709s_phy(struct bnx2 *bp)
1750 bp->mii_bmcr = MII_BMCR + 0x10;
1751 bp->mii_bmsr = MII_BMSR + 0x10;
1752 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1753 bp->mii_adv = MII_ADVERTISE + 0x10;
1754 bp->mii_lpa = MII_LPA + 0x10;
1755 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1757 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1758 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1760 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1763 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1765 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1766 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1767 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1768 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1770 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1771 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1772 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1773 val |= BCM5708S_UP1_2G5;
1775 val &= ~BCM5708S_UP1_2G5;
1776 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1778 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1779 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1780 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1781 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1783 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1785 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1786 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1787 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1789 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1795 bnx2_init_5708s_phy(struct bnx2 *bp)
1801 bp->mii_up1 = BCM5708S_UP1;
1803 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1804 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1805 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1807 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1808 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1809 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1811 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1812 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1813 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1815 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1816 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1817 val |= BCM5708S_UP1_2G5;
1818 bnx2_write_phy(bp, BCM5708S_UP1, val);
1821 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1822 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1823 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1824 /* increase tx signal amplitude */
1825 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1826 BCM5708S_BLK_ADDR_TX_MISC);
1827 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1828 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1829 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1830 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1833 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1834 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1839 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1840 BNX2_SHARED_HW_CFG_CONFIG);
1841 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1842 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1843 BCM5708S_BLK_ADDR_TX_MISC);
1844 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1845 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1846 BCM5708S_BLK_ADDR_DIG);
1853 bnx2_init_5706s_phy(struct bnx2 *bp)
1857 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1859 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1860 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1862 if (bp->dev->mtu > 1500) {
1865 /* Set extended packet length bit */
1866 bnx2_write_phy(bp, 0x18, 0x7);
1867 bnx2_read_phy(bp, 0x18, &val);
1868 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1870 bnx2_write_phy(bp, 0x1c, 0x6c00);
1871 bnx2_read_phy(bp, 0x1c, &val);
1872 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1877 bnx2_write_phy(bp, 0x18, 0x7);
1878 bnx2_read_phy(bp, 0x18, &val);
1879 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1881 bnx2_write_phy(bp, 0x1c, 0x6c00);
1882 bnx2_read_phy(bp, 0x1c, &val);
1883 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1890 bnx2_init_copper_phy(struct bnx2 *bp)
1896 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1897 bnx2_write_phy(bp, 0x18, 0x0c00);
1898 bnx2_write_phy(bp, 0x17, 0x000a);
1899 bnx2_write_phy(bp, 0x15, 0x310b);
1900 bnx2_write_phy(bp, 0x17, 0x201f);
1901 bnx2_write_phy(bp, 0x15, 0x9506);
1902 bnx2_write_phy(bp, 0x17, 0x401f);
1903 bnx2_write_phy(bp, 0x15, 0x14e2);
1904 bnx2_write_phy(bp, 0x18, 0x0400);
1907 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1908 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1909 MII_BNX2_DSP_EXPAND_REG | 0x8);
1910 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1912 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1915 if (bp->dev->mtu > 1500) {
1916 /* Set extended packet length bit */
1917 bnx2_write_phy(bp, 0x18, 0x7);
1918 bnx2_read_phy(bp, 0x18, &val);
1919 bnx2_write_phy(bp, 0x18, val | 0x4000);
1921 bnx2_read_phy(bp, 0x10, &val);
1922 bnx2_write_phy(bp, 0x10, val | 0x1);
1925 bnx2_write_phy(bp, 0x18, 0x7);
1926 bnx2_read_phy(bp, 0x18, &val);
1927 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1929 bnx2_read_phy(bp, 0x10, &val);
1930 bnx2_write_phy(bp, 0x10, val & ~0x1);
1933 /* ethernet@wirespeed */
1934 bnx2_write_phy(bp, 0x18, 0x7007);
1935 bnx2_read_phy(bp, 0x18, &val);
1936 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1942 bnx2_init_phy(struct bnx2 *bp)
1947 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1948 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1950 bp->mii_bmcr = MII_BMCR;
1951 bp->mii_bmsr = MII_BMSR;
1952 bp->mii_bmsr1 = MII_BMSR;
1953 bp->mii_adv = MII_ADVERTISE;
1954 bp->mii_lpa = MII_LPA;
1956 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1958 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1961 bnx2_read_phy(bp, MII_PHYSID1, &val);
1962 bp->phy_id = val << 16;
1963 bnx2_read_phy(bp, MII_PHYSID2, &val);
1964 bp->phy_id |= val & 0xffff;
1966 if (bp->phy_flags & PHY_SERDES_FLAG) {
1967 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1968 rc = bnx2_init_5706s_phy(bp);
1969 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1970 rc = bnx2_init_5708s_phy(bp);
1971 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1972 rc = bnx2_init_5709s_phy(bp);
1975 rc = bnx2_init_copper_phy(bp);
1980 rc = bnx2_setup_phy(bp, bp->phy_port);
1986 bnx2_set_mac_loopback(struct bnx2 *bp)
1990 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1991 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1992 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1993 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1998 static int bnx2_test_link(struct bnx2 *);
2001 bnx2_set_phy_loopback(struct bnx2 *bp)
2006 spin_lock_bh(&bp->phy_lock);
2007 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2009 spin_unlock_bh(&bp->phy_lock);
2013 for (i = 0; i < 10; i++) {
2014 if (bnx2_test_link(bp) == 0)
2019 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2020 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2021 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2022 BNX2_EMAC_MODE_25G_MODE);
2024 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2025 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2031 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2037 msg_data |= bp->fw_wr_seq;
2039 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2041 /* wait for an acknowledgement. */
2042 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2045 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2047 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2050 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2053 /* If we timed out, inform the firmware that this is the case. */
2054 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2056 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2059 msg_data &= ~BNX2_DRV_MSG_CODE;
2060 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2062 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2067 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2074 bnx2_init_5709_context(struct bnx2 *bp)
2079 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2080 val |= (BCM_PAGE_BITS - 8) << 16;
2081 REG_WR(bp, BNX2_CTX_COMMAND, val);
2082 for (i = 0; i < 10; i++) {
2083 val = REG_RD(bp, BNX2_CTX_COMMAND);
2084 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2088 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2091 for (i = 0; i < bp->ctx_pages; i++) {
2094 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2095 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2096 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2097 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2098 (u64) bp->ctx_blk_mapping[i] >> 32);
2099 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2100 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2101 for (j = 0; j < 10; j++) {
2103 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2104 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2108 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2117 bnx2_init_context(struct bnx2 *bp)
2123 u32 vcid_addr, pcid_addr, offset;
2128 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2131 vcid_addr = GET_PCID_ADDR(vcid);
2133 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2138 pcid_addr = GET_PCID_ADDR(new_vcid);
2141 vcid_addr = GET_CID_ADDR(vcid);
2142 pcid_addr = vcid_addr;
2145 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2146 vcid_addr += (i << PHY_CTX_SHIFT);
2147 pcid_addr += (i << PHY_CTX_SHIFT);
2149 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2150 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2152 /* Zero out the context. */
2153 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2154 CTX_WR(bp, vcid_addr, offset, 0);
2160 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2166 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2167 if (good_mbuf == NULL) {
2168 printk(KERN_ERR PFX "Failed to allocate memory in "
2169 "bnx2_alloc_bad_rbuf\n");
2173 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2174 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2178 /* Allocate a bunch of mbufs and save the good ones in an array. */
2179 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2180 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2181 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2183 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2185 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2187 /* The addresses with Bit 9 set are bad memory blocks. */
2188 if (!(val & (1 << 9))) {
2189 good_mbuf[good_mbuf_cnt] = (u16) val;
2193 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2196 /* Free the good ones back to the mbuf pool thus discarding
2197 * all the bad ones. */
2198 while (good_mbuf_cnt) {
2201 val = good_mbuf[good_mbuf_cnt];
2202 val = (val << 9) | val | 1;
2204 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2211 bnx2_set_mac_addr(struct bnx2 *bp)
2214 u8 *mac_addr = bp->dev->dev_addr;
2216 val = (mac_addr[0] << 8) | mac_addr[1];
2218 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2220 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2221 (mac_addr[4] << 8) | mac_addr[5];
2223 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2227 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2230 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2231 struct rx_bd *rxbd =
2232 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2233 struct page *page = alloc_page(GFP_ATOMIC);
2237 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2238 PCI_DMA_FROMDEVICE);
2240 pci_unmap_addr_set(rx_pg, mapping, mapping);
2241 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2242 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2247 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2249 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2250 struct page *page = rx_pg->page;
2255 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2256 PCI_DMA_FROMDEVICE);
2263 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2265 struct sk_buff *skb;
2266 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2268 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2269 unsigned long align;
2271 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2276 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2277 skb_reserve(skb, BNX2_RX_ALIGN - align);
2279 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2280 PCI_DMA_FROMDEVICE);
2283 pci_unmap_addr_set(rx_buf, mapping, mapping);
2285 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2286 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2288 bp->rx_prod_bseq += bp->rx_buf_use_size;
2294 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2296 struct status_block *sblk = bp->status_blk;
2297 u32 new_link_state, old_link_state;
2300 new_link_state = sblk->status_attn_bits & event;
2301 old_link_state = sblk->status_attn_bits_ack & event;
2302 if (new_link_state != old_link_state) {
2304 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2306 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2314 bnx2_phy_int(struct bnx2 *bp)
2316 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2317 spin_lock(&bp->phy_lock);
2319 spin_unlock(&bp->phy_lock);
2321 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2322 bnx2_set_remote_link(bp);
2327 bnx2_tx_int(struct bnx2 *bp)
2329 struct status_block *sblk = bp->status_blk;
2330 u16 hw_cons, sw_cons, sw_ring_cons;
2333 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2334 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2337 sw_cons = bp->tx_cons;
2339 while (sw_cons != hw_cons) {
2340 struct sw_bd *tx_buf;
2341 struct sk_buff *skb;
2344 sw_ring_cons = TX_RING_IDX(sw_cons);
2346 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2349 /* partial BD completions possible with TSO packets */
2350 if (skb_is_gso(skb)) {
2351 u16 last_idx, last_ring_idx;
2353 last_idx = sw_cons +
2354 skb_shinfo(skb)->nr_frags + 1;
2355 last_ring_idx = sw_ring_cons +
2356 skb_shinfo(skb)->nr_frags + 1;
2357 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2360 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2365 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2366 skb_headlen(skb), PCI_DMA_TODEVICE);
2369 last = skb_shinfo(skb)->nr_frags;
2371 for (i = 0; i < last; i++) {
2372 sw_cons = NEXT_TX_BD(sw_cons);
2374 pci_unmap_page(bp->pdev,
2376 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2378 skb_shinfo(skb)->frags[i].size,
2382 sw_cons = NEXT_TX_BD(sw_cons);
2384 tx_free_bd += last + 1;
2388 hw_cons = bp->hw_tx_cons =
2389 sblk->status_tx_quick_consumer_index0;
2391 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2396 bp->tx_cons = sw_cons;
2397 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2398 * before checking for netif_queue_stopped(). Without the
2399 * memory barrier, there is a small possibility that bnx2_start_xmit()
2400 * will miss it and cause the queue to be stopped forever.
2404 if (unlikely(netif_queue_stopped(bp->dev)) &&
2405 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2406 netif_tx_lock(bp->dev);
2407 if ((netif_queue_stopped(bp->dev)) &&
2408 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2409 netif_wake_queue(bp->dev);
2410 netif_tx_unlock(bp->dev);
2415 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2418 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2419 struct rx_bd *cons_bd, *prod_bd;
2421 cons_rx_buf = &bp->rx_buf_ring[cons];
2422 prod_rx_buf = &bp->rx_buf_ring[prod];
2424 pci_dma_sync_single_for_device(bp->pdev,
2425 pci_unmap_addr(cons_rx_buf, mapping),
2426 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2428 bp->rx_prod_bseq += bp->rx_buf_use_size;
2430 prod_rx_buf->skb = skb;
2435 pci_unmap_addr_set(prod_rx_buf, mapping,
2436 pci_unmap_addr(cons_rx_buf, mapping));
2438 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2439 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2440 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2441 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2445 bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
2446 dma_addr_t dma_addr, u32 ring_idx)
2449 u16 prod = ring_idx & 0xffff;
2451 err = bnx2_alloc_rx_skb(bp, prod);
2452 if (unlikely(err)) {
2453 bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod);
2457 skb_reserve(skb, bp->rx_offset);
2458 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2459 PCI_DMA_FROMDEVICE);
2466 bnx2_get_hw_rx_cons(struct bnx2 *bp)
2468 u16 cons = bp->status_blk->status_rx_quick_consumer_index0;
2470 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2476 bnx2_rx_int(struct bnx2 *bp, int budget)
2478 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2479 struct l2_fhdr *rx_hdr;
2482 hw_cons = bnx2_get_hw_rx_cons(bp);
2483 sw_cons = bp->rx_cons;
2484 sw_prod = bp->rx_prod;
2486 /* Memory barrier necessary as speculative reads of the rx
2487 * buffer can be ahead of the index in the status block
2490 while (sw_cons != hw_cons) {
2493 struct sw_bd *rx_buf;
2494 struct sk_buff *skb;
2495 dma_addr_t dma_addr;
2497 sw_ring_cons = RX_RING_IDX(sw_cons);
2498 sw_ring_prod = RX_RING_IDX(sw_prod);
2500 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2505 dma_addr = pci_unmap_addr(rx_buf, mapping);
2507 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2508 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2510 rx_hdr = (struct l2_fhdr *) skb->data;
2511 len = rx_hdr->l2_fhdr_pkt_len - 4;
2513 if ((status = rx_hdr->l2_fhdr_status) &
2514 (L2_FHDR_ERRORS_BAD_CRC |
2515 L2_FHDR_ERRORS_PHY_DECODE |
2516 L2_FHDR_ERRORS_ALIGNMENT |
2517 L2_FHDR_ERRORS_TOO_SHORT |
2518 L2_FHDR_ERRORS_GIANT_FRAME)) {
2520 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod);
2524 if (len <= bp->rx_copy_thresh) {
2525 struct sk_buff *new_skb;
2527 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2528 if (new_skb == NULL) {
2529 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons,
2535 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2536 new_skb->data, len + 2);
2537 skb_reserve(new_skb, 2);
2538 skb_put(new_skb, len);
2540 bnx2_reuse_rx_skb(bp, skb,
2541 sw_ring_cons, sw_ring_prod);
2544 } else if (unlikely(bnx2_rx_skb(bp, skb, len, dma_addr,
2545 (sw_ring_cons << 16) | sw_ring_prod)))
2548 skb->protocol = eth_type_trans(skb, bp->dev);
2550 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2551 (ntohs(skb->protocol) != 0x8100)) {
2558 skb->ip_summed = CHECKSUM_NONE;
2560 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2561 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2563 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2564 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2565 skb->ip_summed = CHECKSUM_UNNECESSARY;
2569 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2570 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2571 rx_hdr->l2_fhdr_vlan_tag);
2575 netif_receive_skb(skb);
2577 bp->dev->last_rx = jiffies;
2581 sw_cons = NEXT_RX_BD(sw_cons);
2582 sw_prod = NEXT_RX_BD(sw_prod);
2584 if ((rx_pkt == budget))
2587 /* Refresh hw_cons to see if there is new work */
2588 if (sw_cons == hw_cons) {
2589 hw_cons = bnx2_get_hw_rx_cons(bp);
2593 bp->rx_cons = sw_cons;
2594 bp->rx_prod = sw_prod;
2596 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2598 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2606 /* MSI ISR - The only difference between this and the INTx ISR
2607 * is that the MSI interrupt is always serviced.
2610 bnx2_msi(int irq, void *dev_instance)
2612 struct net_device *dev = dev_instance;
2613 struct bnx2 *bp = netdev_priv(dev);
2615 prefetch(bp->status_blk);
2616 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2617 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2618 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2620 /* Return here if interrupt is disabled. */
2621 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2624 netif_rx_schedule(dev, &bp->napi);
2630 bnx2_msi_1shot(int irq, void *dev_instance)
2632 struct net_device *dev = dev_instance;
2633 struct bnx2 *bp = netdev_priv(dev);
2635 prefetch(bp->status_blk);
2637 /* Return here if interrupt is disabled. */
2638 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2641 netif_rx_schedule(dev, &bp->napi);
2647 bnx2_interrupt(int irq, void *dev_instance)
2649 struct net_device *dev = dev_instance;
2650 struct bnx2 *bp = netdev_priv(dev);
2651 struct status_block *sblk = bp->status_blk;
2653 /* When using INTx, it is possible for the interrupt to arrive
2654 * at the CPU before the status block posted prior to the
2655 * interrupt. Reading a register will flush the status block.
2656 * When using MSI, the MSI message will always complete after
2657 * the status block write.
2659 if ((sblk->status_idx == bp->last_status_idx) &&
2660 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2661 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2664 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2665 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2666 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2668 /* Read back to deassert IRQ immediately to avoid too many
2669 * spurious interrupts.
2671 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2673 /* Return here if interrupt is shared and is disabled. */
2674 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2677 if (netif_rx_schedule_prep(dev, &bp->napi)) {
2678 bp->last_status_idx = sblk->status_idx;
2679 __netif_rx_schedule(dev, &bp->napi);
2685 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2686 STATUS_ATTN_BITS_TIMER_ABORT)
2689 bnx2_has_work(struct bnx2 *bp)
2691 struct status_block *sblk = bp->status_blk;
2693 if ((bnx2_get_hw_rx_cons(bp) != bp->rx_cons) ||
2694 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2697 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2698 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2704 static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget)
2706 struct status_block *sblk = bp->status_blk;
2707 u32 status_attn_bits = sblk->status_attn_bits;
2708 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2710 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2711 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2715 /* This is needed to take care of transient status
2716 * during link changes.
2718 REG_WR(bp, BNX2_HC_COMMAND,
2719 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2720 REG_RD(bp, BNX2_HC_COMMAND);
2723 if (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2726 if (bnx2_get_hw_rx_cons(bp) != bp->rx_cons)
2727 work_done += bnx2_rx_int(bp, budget - work_done);
2732 static int bnx2_poll(struct napi_struct *napi, int budget)
2734 struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2736 struct status_block *sblk = bp->status_blk;
2739 work_done = bnx2_poll_work(bp, work_done, budget);
2741 if (unlikely(work_done >= budget))
2744 /* bp->last_status_idx is used below to tell the hw how
2745 * much work has been processed, so we must read it before
2746 * checking for more work.
2748 bp->last_status_idx = sblk->status_idx;
2750 if (likely(!bnx2_has_work(bp))) {
2751 netif_rx_complete(bp->dev, napi);
2752 if (likely(bp->flags & USING_MSI_FLAG)) {
2753 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2754 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2755 bp->last_status_idx);
2758 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2759 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2760 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2761 bp->last_status_idx);
2763 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2764 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2765 bp->last_status_idx);
2773 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2774 * from set_multicast.
2777 bnx2_set_rx_mode(struct net_device *dev)
2779 struct bnx2 *bp = netdev_priv(dev);
2780 u32 rx_mode, sort_mode;
2783 spin_lock_bh(&bp->phy_lock);
2785 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2786 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2787 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2789 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2790 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2792 if (!(bp->flags & ASF_ENABLE_FLAG))
2793 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2795 if (dev->flags & IFF_PROMISC) {
2796 /* Promiscuous mode. */
2797 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2798 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2799 BNX2_RPM_SORT_USER0_PROM_VLAN;
2801 else if (dev->flags & IFF_ALLMULTI) {
2802 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2803 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2806 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2809 /* Accept one or more multicast(s). */
2810 struct dev_mc_list *mclist;
2811 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2816 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2818 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2819 i++, mclist = mclist->next) {
2821 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2823 regidx = (bit & 0xe0) >> 5;
2825 mc_filter[regidx] |= (1 << bit);
2828 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2829 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2833 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2836 if (rx_mode != bp->rx_mode) {
2837 bp->rx_mode = rx_mode;
2838 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2841 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2842 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2843 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2845 spin_unlock_bh(&bp->phy_lock);
2849 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2856 for (i = 0; i < rv2p_code_len; i += 8) {
2857 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2859 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2862 if (rv2p_proc == RV2P_PROC1) {
2863 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2864 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2867 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2868 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2872 /* Reset the processor, un-stall is done later. */
2873 if (rv2p_proc == RV2P_PROC1) {
2874 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2877 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2882 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2889 val = REG_RD_IND(bp, cpu_reg->mode);
2890 val |= cpu_reg->mode_value_halt;
2891 REG_WR_IND(bp, cpu_reg->mode, val);
2892 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2894 /* Load the Text area. */
2895 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2899 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
2904 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2905 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2909 /* Load the Data area. */
2910 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2914 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2915 REG_WR_IND(bp, offset, fw->data[j]);
2919 /* Load the SBSS area. */
2920 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2924 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2925 REG_WR_IND(bp, offset, 0);
2929 /* Load the BSS area. */
2930 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2934 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2935 REG_WR_IND(bp, offset, 0);
2939 /* Load the Read-Only area. */
2940 offset = cpu_reg->spad_base +
2941 (fw->rodata_addr - cpu_reg->mips_view_base);
2945 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2946 REG_WR_IND(bp, offset, fw->rodata[j]);
2950 /* Clear the pre-fetch instruction. */
2951 REG_WR_IND(bp, cpu_reg->inst, 0);
2952 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2954 /* Start the CPU. */
2955 val = REG_RD_IND(bp, cpu_reg->mode);
2956 val &= ~cpu_reg->mode_value_halt;
2957 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2958 REG_WR_IND(bp, cpu_reg->mode, val);
2964 bnx2_init_cpus(struct bnx2 *bp)
2966 struct cpu_reg cpu_reg;
2971 /* Initialize the RV2P processor. */
2972 text = vmalloc(FW_BUF_SIZE);
2975 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2976 rv2p = bnx2_xi_rv2p_proc1;
2977 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
2979 rv2p = bnx2_rv2p_proc1;
2980 rv2p_len = sizeof(bnx2_rv2p_proc1);
2982 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
2986 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
2988 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2989 rv2p = bnx2_xi_rv2p_proc2;
2990 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
2992 rv2p = bnx2_rv2p_proc2;
2993 rv2p_len = sizeof(bnx2_rv2p_proc2);
2995 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
2999 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3001 /* Initialize the RX Processor. */
3002 cpu_reg.mode = BNX2_RXP_CPU_MODE;
3003 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3004 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3005 cpu_reg.state = BNX2_RXP_CPU_STATE;
3006 cpu_reg.state_value_clear = 0xffffff;
3007 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3008 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3009 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3010 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3011 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3012 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3013 cpu_reg.mips_view_base = 0x8000000;
3015 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3016 fw = &bnx2_rxp_fw_09;
3018 fw = &bnx2_rxp_fw_06;
3021 rc = load_cpu_fw(bp, &cpu_reg, fw);
3025 /* Initialize the TX Processor. */
3026 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3027 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3028 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3029 cpu_reg.state = BNX2_TXP_CPU_STATE;
3030 cpu_reg.state_value_clear = 0xffffff;
3031 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3032 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3033 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3034 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3035 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3036 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3037 cpu_reg.mips_view_base = 0x8000000;
3039 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3040 fw = &bnx2_txp_fw_09;
3042 fw = &bnx2_txp_fw_06;
3045 rc = load_cpu_fw(bp, &cpu_reg, fw);
3049 /* Initialize the TX Patch-up Processor. */
3050 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3051 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3052 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3053 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3054 cpu_reg.state_value_clear = 0xffffff;
3055 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3056 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3057 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3058 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3059 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3060 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3061 cpu_reg.mips_view_base = 0x8000000;
3063 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3064 fw = &bnx2_tpat_fw_09;
3066 fw = &bnx2_tpat_fw_06;
3069 rc = load_cpu_fw(bp, &cpu_reg, fw);
3073 /* Initialize the Completion Processor. */
3074 cpu_reg.mode = BNX2_COM_CPU_MODE;
3075 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3076 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3077 cpu_reg.state = BNX2_COM_CPU_STATE;
3078 cpu_reg.state_value_clear = 0xffffff;
3079 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3080 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3081 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3082 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3083 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3084 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3085 cpu_reg.mips_view_base = 0x8000000;
3087 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3088 fw = &bnx2_com_fw_09;
3090 fw = &bnx2_com_fw_06;
3093 rc = load_cpu_fw(bp, &cpu_reg, fw);
3097 /* Initialize the Command Processor. */
3098 cpu_reg.mode = BNX2_CP_CPU_MODE;
3099 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3100 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3101 cpu_reg.state = BNX2_CP_CPU_STATE;
3102 cpu_reg.state_value_clear = 0xffffff;
3103 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3104 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3105 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3106 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3107 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3108 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3109 cpu_reg.mips_view_base = 0x8000000;
3111 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3112 fw = &bnx2_cp_fw_09;
3114 fw = &bnx2_cp_fw_06;
3117 rc = load_cpu_fw(bp, &cpu_reg, fw);
3125 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3129 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3135 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3136 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3137 PCI_PM_CTRL_PME_STATUS);
3139 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3140 /* delay required during transition out of D3hot */
3143 val = REG_RD(bp, BNX2_EMAC_MODE);
3144 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3145 val &= ~BNX2_EMAC_MODE_MPKT;
3146 REG_WR(bp, BNX2_EMAC_MODE, val);
3148 val = REG_RD(bp, BNX2_RPM_CONFIG);
3149 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3150 REG_WR(bp, BNX2_RPM_CONFIG, val);
3161 autoneg = bp->autoneg;
3162 advertising = bp->advertising;
3164 if (bp->phy_port == PORT_TP) {
3165 bp->autoneg = AUTONEG_SPEED;
3166 bp->advertising = ADVERTISED_10baseT_Half |
3167 ADVERTISED_10baseT_Full |
3168 ADVERTISED_100baseT_Half |
3169 ADVERTISED_100baseT_Full |
3173 spin_lock_bh(&bp->phy_lock);
3174 bnx2_setup_phy(bp, bp->phy_port);
3175 spin_unlock_bh(&bp->phy_lock);
3177 bp->autoneg = autoneg;
3178 bp->advertising = advertising;
3180 bnx2_set_mac_addr(bp);
3182 val = REG_RD(bp, BNX2_EMAC_MODE);
3184 /* Enable port mode. */
3185 val &= ~BNX2_EMAC_MODE_PORT;
3186 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3187 BNX2_EMAC_MODE_ACPI_RCVD |
3188 BNX2_EMAC_MODE_MPKT;
3189 if (bp->phy_port == PORT_TP)
3190 val |= BNX2_EMAC_MODE_PORT_MII;
3192 val |= BNX2_EMAC_MODE_PORT_GMII;
3193 if (bp->line_speed == SPEED_2500)
3194 val |= BNX2_EMAC_MODE_25G_MODE;
3197 REG_WR(bp, BNX2_EMAC_MODE, val);
3199 /* receive all multicast */
3200 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3201 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3204 REG_WR(bp, BNX2_EMAC_RX_MODE,
3205 BNX2_EMAC_RX_MODE_SORT_MODE);
3207 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3208 BNX2_RPM_SORT_USER0_MC_EN;
3209 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3210 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3211 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3212 BNX2_RPM_SORT_USER0_ENA);
3214 /* Need to enable EMAC and RPM for WOL. */
3215 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3216 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3217 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3218 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3220 val = REG_RD(bp, BNX2_RPM_CONFIG);
3221 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3222 REG_WR(bp, BNX2_RPM_CONFIG, val);
3224 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3227 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3230 if (!(bp->flags & NO_WOL_FLAG))
3231 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3233 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3234 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3235 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3244 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3246 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3249 /* No more memory access after this point until
3250 * device is brought back to D0.
3262 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3267 /* Request access to the flash interface. */
3268 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3269 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3270 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3271 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3277 if (j >= NVRAM_TIMEOUT_COUNT)
3284 bnx2_release_nvram_lock(struct bnx2 *bp)
3289 /* Relinquish nvram interface. */
3290 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3292 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3293 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3294 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3300 if (j >= NVRAM_TIMEOUT_COUNT)
3308 bnx2_enable_nvram_write(struct bnx2 *bp)
3312 val = REG_RD(bp, BNX2_MISC_CFG);
3313 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3315 if (bp->flash_info->flags & BNX2_NV_WREN) {
3318 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3319 REG_WR(bp, BNX2_NVM_COMMAND,
3320 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3322 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3325 val = REG_RD(bp, BNX2_NVM_COMMAND);
3326 if (val & BNX2_NVM_COMMAND_DONE)
3330 if (j >= NVRAM_TIMEOUT_COUNT)
3337 bnx2_disable_nvram_write(struct bnx2 *bp)
3341 val = REG_RD(bp, BNX2_MISC_CFG);
3342 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3347 bnx2_enable_nvram_access(struct bnx2 *bp)
3351 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3352 /* Enable both bits, even on read. */
3353 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3354 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3358 bnx2_disable_nvram_access(struct bnx2 *bp)
3362 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3363 /* Disable both bits, even after read. */
3364 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3365 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3366 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3370 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3375 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3376 /* Buffered flash, no erase needed */
3379 /* Build an erase command */
3380 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3381 BNX2_NVM_COMMAND_DOIT;
3383 /* Need to clear DONE bit separately. */
3384 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3386 /* Address of the NVRAM to read from. */
3387 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3389 /* Issue an erase command. */
3390 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3392 /* Wait for completion. */
3393 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3398 val = REG_RD(bp, BNX2_NVM_COMMAND);
3399 if (val & BNX2_NVM_COMMAND_DONE)
3403 if (j >= NVRAM_TIMEOUT_COUNT)
3410 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3415 /* Build the command word. */
3416 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3418 /* Calculate an offset of a buffered flash, not needed for 5709. */
3419 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3420 offset = ((offset / bp->flash_info->page_size) <<
3421 bp->flash_info->page_bits) +
3422 (offset % bp->flash_info->page_size);
3425 /* Need to clear DONE bit separately. */
3426 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3428 /* Address of the NVRAM to read from. */
3429 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3431 /* Issue a read command. */
3432 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3434 /* Wait for completion. */
3435 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3440 val = REG_RD(bp, BNX2_NVM_COMMAND);
3441 if (val & BNX2_NVM_COMMAND_DONE) {
3442 val = REG_RD(bp, BNX2_NVM_READ);
3444 val = be32_to_cpu(val);
3445 memcpy(ret_val, &val, 4);
3449 if (j >= NVRAM_TIMEOUT_COUNT)
3457 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3462 /* Build the command word. */
3463 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3465 /* Calculate an offset of a buffered flash, not needed for 5709. */
3466 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3467 offset = ((offset / bp->flash_info->page_size) <<
3468 bp->flash_info->page_bits) +
3469 (offset % bp->flash_info->page_size);
3472 /* Need to clear DONE bit separately. */
3473 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3475 memcpy(&val32, val, 4);
3476 val32 = cpu_to_be32(val32);
3478 /* Write the data. */
3479 REG_WR(bp, BNX2_NVM_WRITE, val32);
3481 /* Address of the NVRAM to write to. */
3482 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3484 /* Issue the write command. */
3485 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3487 /* Wait for completion. */
3488 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3491 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3494 if (j >= NVRAM_TIMEOUT_COUNT)
3501 bnx2_init_nvram(struct bnx2 *bp)
3504 int j, entry_count, rc = 0;
3505 struct flash_spec *flash;
3507 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3508 bp->flash_info = &flash_5709;
3509 goto get_flash_size;
3512 /* Determine the selected interface. */
3513 val = REG_RD(bp, BNX2_NVM_CFG1);
3515 entry_count = ARRAY_SIZE(flash_table);
3517 if (val & 0x40000000) {
3519 /* Flash interface has been reconfigured */
3520 for (j = 0, flash = &flash_table[0]; j < entry_count;
3522 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3523 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3524 bp->flash_info = flash;
3531 /* Not yet been reconfigured */
3533 if (val & (1 << 23))
3534 mask = FLASH_BACKUP_STRAP_MASK;
3536 mask = FLASH_STRAP_MASK;
3538 for (j = 0, flash = &flash_table[0]; j < entry_count;
3541 if ((val & mask) == (flash->strapping & mask)) {
3542 bp->flash_info = flash;
3544 /* Request access to the flash interface. */
3545 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3548 /* Enable access to flash interface */
3549 bnx2_enable_nvram_access(bp);
3551 /* Reconfigure the flash interface */
3552 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3553 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3554 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3555 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3557 /* Disable access to flash interface */
3558 bnx2_disable_nvram_access(bp);
3559 bnx2_release_nvram_lock(bp);
3564 } /* if (val & 0x40000000) */
3566 if (j == entry_count) {
3567 bp->flash_info = NULL;
3568 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3573 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3574 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3576 bp->flash_size = val;
3578 bp->flash_size = bp->flash_info->total_size;
3584 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3588 u32 cmd_flags, offset32, len32, extra;
3593 /* Request access to the flash interface. */
3594 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3597 /* Enable access to flash interface */
3598 bnx2_enable_nvram_access(bp);
3611 pre_len = 4 - (offset & 3);
3613 if (pre_len >= len32) {
3615 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3616 BNX2_NVM_COMMAND_LAST;
3619 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3622 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3627 memcpy(ret_buf, buf + (offset & 3), pre_len);
3634 extra = 4 - (len32 & 3);
3635 len32 = (len32 + 4) & ~3;
3642 cmd_flags = BNX2_NVM_COMMAND_LAST;
3644 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3645 BNX2_NVM_COMMAND_LAST;
3647 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3649 memcpy(ret_buf, buf, 4 - extra);
3651 else if (len32 > 0) {
3654 /* Read the first word. */
3658 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3660 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3662 /* Advance to the next dword. */
3667 while (len32 > 4 && rc == 0) {
3668 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3670 /* Advance to the next dword. */
3679 cmd_flags = BNX2_NVM_COMMAND_LAST;
3680 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3682 memcpy(ret_buf, buf, 4 - extra);
3685 /* Disable access to flash interface */
3686 bnx2_disable_nvram_access(bp);
3688 bnx2_release_nvram_lock(bp);
3694 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3697 u32 written, offset32, len32;
3698 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3700 int align_start, align_end;
3705 align_start = align_end = 0;
3707 if ((align_start = (offset32 & 3))) {
3709 len32 += align_start;
3712 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3717 align_end = 4 - (len32 & 3);
3719 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3723 if (align_start || align_end) {
3724 align_buf = kmalloc(len32, GFP_KERNEL);
3725 if (align_buf == NULL)
3728 memcpy(align_buf, start, 4);
3731 memcpy(align_buf + len32 - 4, end, 4);
3733 memcpy(align_buf + align_start, data_buf, buf_size);
3737 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3738 flash_buffer = kmalloc(264, GFP_KERNEL);
3739 if (flash_buffer == NULL) {
3741 goto nvram_write_end;
3746 while ((written < len32) && (rc == 0)) {
3747 u32 page_start, page_end, data_start, data_end;
3748 u32 addr, cmd_flags;
3751 /* Find the page_start addr */
3752 page_start = offset32 + written;
3753 page_start -= (page_start % bp->flash_info->page_size);
3754 /* Find the page_end addr */
3755 page_end = page_start + bp->flash_info->page_size;
3756 /* Find the data_start addr */
3757 data_start = (written == 0) ? offset32 : page_start;
3758 /* Find the data_end addr */
3759 data_end = (page_end > offset32 + len32) ?
3760 (offset32 + len32) : page_end;
3762 /* Request access to the flash interface. */
3763 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3764 goto nvram_write_end;
3766 /* Enable access to flash interface */
3767 bnx2_enable_nvram_access(bp);
3769 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3770 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3773 /* Read the whole page into the buffer
3774 * (non-buffer flash only) */
3775 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3776 if (j == (bp->flash_info->page_size - 4)) {
3777 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3779 rc = bnx2_nvram_read_dword(bp,
3785 goto nvram_write_end;
3791 /* Enable writes to flash interface (unlock write-protect) */
3792 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3793 goto nvram_write_end;
3795 /* Loop to write back the buffer data from page_start to
3798 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3799 /* Erase the page */
3800 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3801 goto nvram_write_end;
3803 /* Re-enable the write again for the actual write */
3804 bnx2_enable_nvram_write(bp);
3806 for (addr = page_start; addr < data_start;
3807 addr += 4, i += 4) {
3809 rc = bnx2_nvram_write_dword(bp, addr,
3810 &flash_buffer[i], cmd_flags);
3813 goto nvram_write_end;
3819 /* Loop to write the new data from data_start to data_end */
3820 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3821 if ((addr == page_end - 4) ||
3822 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
3823 (addr == data_end - 4))) {
3825 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3827 rc = bnx2_nvram_write_dword(bp, addr, buf,
3831 goto nvram_write_end;
3837 /* Loop to write back the buffer data from data_end
3839 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3840 for (addr = data_end; addr < page_end;
3841 addr += 4, i += 4) {
3843 if (addr == page_end-4) {
3844 cmd_flags = BNX2_NVM_COMMAND_LAST;
3846 rc = bnx2_nvram_write_dword(bp, addr,
3847 &flash_buffer[i], cmd_flags);
3850 goto nvram_write_end;
3856 /* Disable writes to flash interface (lock write-protect) */
3857 bnx2_disable_nvram_write(bp);
3859 /* Disable access to flash interface */
3860 bnx2_disable_nvram_access(bp);
3861 bnx2_release_nvram_lock(bp);
3863 /* Increment written */
3864 written += data_end - data_start;
3868 kfree(flash_buffer);
3874 bnx2_init_remote_phy(struct bnx2 *bp)
3878 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3879 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3882 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3883 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3886 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3887 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3889 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3890 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3891 bp->phy_port = PORT_FIBRE;
3893 bp->phy_port = PORT_TP;
3895 if (netif_running(bp->dev)) {
3898 if (val & BNX2_LINK_STATUS_LINK_UP) {
3900 netif_carrier_on(bp->dev);
3903 netif_carrier_off(bp->dev);
3905 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
3906 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3907 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3914 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3920 /* Wait for the current PCI transaction to complete before
3921 * issuing a reset. */
3922 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3923 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3924 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3925 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3926 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3927 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3930 /* Wait for the firmware to tell us it is ok to issue a reset. */
3931 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3933 /* Deposit a driver reset signature so the firmware knows that
3934 * this is a soft reset. */
3935 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3936 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3938 /* Do a dummy read to force the chip to complete all current transaction
3939 * before we issue a reset. */
3940 val = REG_RD(bp, BNX2_MISC_ID);
3942 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3943 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3944 REG_RD(bp, BNX2_MISC_COMMAND);
3947 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3948 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3950 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3953 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3954 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3955 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3958 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3960 /* Reading back any register after chip reset will hang the
3961 * bus on 5706 A0 and A1. The msleep below provides plenty
3962 * of margin for write posting.
3964 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3965 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3968 /* Reset takes approximate 30 usec */
3969 for (i = 0; i < 10; i++) {
3970 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3971 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3972 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3977 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3978 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3979 printk(KERN_ERR PFX "Chip reset did not complete\n");
3984 /* Make sure byte swapping is properly configured. */
3985 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3986 if (val != 0x01020304) {
3987 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3991 /* Wait for the firmware to finish its initialization. */
3992 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3996 spin_lock_bh(&bp->phy_lock);
3997 old_port = bp->phy_port;
3998 bnx2_init_remote_phy(bp);
3999 if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
4000 bnx2_set_default_remote_link(bp);
4001 spin_unlock_bh(&bp->phy_lock);
4003 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4004 /* Adjust the voltage regular to two steps lower. The default
4005 * of this register is 0x0000000e. */
4006 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4008 /* Remove bad rbuf memory from the free pool. */
4009 rc = bnx2_alloc_bad_rbuf(bp);
4016 bnx2_init_chip(struct bnx2 *bp)
4021 /* Make sure the interrupt is not active. */
4022 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4024 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4025 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4027 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4029 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4030 DMA_READ_CHANS << 12 |
4031 DMA_WRITE_CHANS << 16;
4033 val |= (0x2 << 20) | (1 << 11);
4035 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
4038 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4039 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4040 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4042 REG_WR(bp, BNX2_DMA_CONFIG, val);
4044 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4045 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4046 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4047 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4050 if (bp->flags & PCIX_FLAG) {
4053 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4055 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4056 val16 & ~PCI_X_CMD_ERO);
4059 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4060 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4061 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4062 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4064 /* Initialize context mapping and zero out the quick contexts. The
4065 * context block must have already been enabled. */
4066 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4067 rc = bnx2_init_5709_context(bp);
4071 bnx2_init_context(bp);
4073 if ((rc = bnx2_init_cpus(bp)) != 0)
4076 bnx2_init_nvram(bp);
4078 bnx2_set_mac_addr(bp);
4080 val = REG_RD(bp, BNX2_MQ_CONFIG);
4081 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4082 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4083 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4084 val |= BNX2_MQ_CONFIG_HALT_DIS;
4086 REG_WR(bp, BNX2_MQ_CONFIG, val);
4088 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4089 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4090 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4092 val = (BCM_PAGE_BITS - 8) << 24;
4093 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4095 /* Configure page size. */
4096 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4097 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4098 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4099 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4101 val = bp->mac_addr[0] +
4102 (bp->mac_addr[1] << 8) +
4103 (bp->mac_addr[2] << 16) +
4105 (bp->mac_addr[4] << 8) +
4106 (bp->mac_addr[5] << 16);
4107 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4109 /* Program the MTU. Also include 4 bytes for CRC32. */
4110 val = bp->dev->mtu + ETH_HLEN + 4;
4111 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4112 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4113 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4115 bp->last_status_idx = 0;
4116 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4118 /* Set up how to generate a link change interrupt. */
4119 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4121 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4122 (u64) bp->status_blk_mapping & 0xffffffff);
4123 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4125 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4126 (u64) bp->stats_blk_mapping & 0xffffffff);
4127 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4128 (u64) bp->stats_blk_mapping >> 32);
4130 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4131 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4133 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4134 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4136 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4137 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4139 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4141 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4143 REG_WR(bp, BNX2_HC_COM_TICKS,
4144 (bp->com_ticks_int << 16) | bp->com_ticks);
4146 REG_WR(bp, BNX2_HC_CMD_TICKS,
4147 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4149 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4150 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4152 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4153 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4155 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4156 val = BNX2_HC_CONFIG_COLLECT_STATS;
4158 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4159 BNX2_HC_CONFIG_COLLECT_STATS;
4162 if (bp->flags & ONE_SHOT_MSI_FLAG)
4163 val |= BNX2_HC_CONFIG_ONE_SHOT;
4165 REG_WR(bp, BNX2_HC_CONFIG, val);
4167 /* Clear internal stats counters. */
4168 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4170 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4172 /* Initialize the receive filter. */
4173 bnx2_set_rx_mode(bp->dev);
4175 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4176 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4177 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4178 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4180 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4183 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4184 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4188 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4194 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4196 u32 val, offset0, offset1, offset2, offset3;
4198 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4199 offset0 = BNX2_L2CTX_TYPE_XI;
4200 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4201 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4202 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4204 offset0 = BNX2_L2CTX_TYPE;
4205 offset1 = BNX2_L2CTX_CMD_TYPE;
4206 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4207 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4209 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4210 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4212 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4213 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4215 val = (u64) bp->tx_desc_mapping >> 32;
4216 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4218 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4219 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4223 bnx2_init_tx_ring(struct bnx2 *bp)
4228 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4230 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4232 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4233 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4238 bp->tx_prod_bseq = 0;
4241 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4242 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4244 bnx2_init_tx_context(bp, cid);
4248 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4254 for (i = 0; i < num_rings; i++) {
4257 rxbd = &rx_ring[i][0];
4258 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4259 rxbd->rx_bd_len = buf_size;
4260 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4262 if (i == (num_rings - 1))
4266 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4267 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4272 bnx2_init_rx_ring(struct bnx2 *bp)
4275 u16 prod, ring_prod;
4276 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4280 bp->rx_prod_bseq = 0;
4284 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4285 bp->rx_buf_use_size, bp->rx_max_ring);
4287 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4288 if (bp->rx_pg_ring_size) {
4289 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4290 bp->rx_pg_desc_mapping,
4291 PAGE_SIZE, bp->rx_max_pg_ring);
4292 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4293 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4294 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4295 BNX2_L2CTX_RBDC_JUMBO_KEY);
4297 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4298 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4300 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4301 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4303 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4304 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4307 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4308 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4310 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4312 val = (u64) bp->rx_desc_mapping[0] >> 32;
4313 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4315 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4316 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4318 ring_prod = prod = bp->rx_pg_prod;
4319 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4320 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4322 prod = NEXT_RX_BD(prod);
4323 ring_prod = RX_PG_RING_IDX(prod);
4325 bp->rx_pg_prod = prod;
4327 ring_prod = prod = bp->rx_prod;
4328 for (i = 0; i < bp->rx_ring_size; i++) {
4329 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4332 prod = NEXT_RX_BD(prod);
4333 ring_prod = RX_RING_IDX(prod);
4337 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX, bp->rx_pg_prod);
4338 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4340 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4343 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4345 u32 max, num_rings = 1;
4347 while (ring_size > MAX_RX_DESC_CNT) {
4348 ring_size -= MAX_RX_DESC_CNT;
4351 /* round to next power of 2 */
4353 while ((max & num_rings) == 0)
4356 if (num_rings != max)
4363 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4367 /* 8 for CRC and VLAN */
4368 rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4370 bp->rx_copy_thresh = RX_COPY_THRESH;
4371 bp->rx_pg_ring_size = 0;
4372 bp->rx_max_pg_ring = 0;
4373 bp->rx_max_pg_ring_idx = 0;
4375 bp->rx_buf_use_size = rx_size;
4377 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4378 bp->rx_ring_size = size;
4379 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4380 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4384 bnx2_free_tx_skbs(struct bnx2 *bp)
4388 if (bp->tx_buf_ring == NULL)
4391 for (i = 0; i < TX_DESC_CNT; ) {
4392 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4393 struct sk_buff *skb = tx_buf->skb;
4401 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4402 skb_headlen(skb), PCI_DMA_TODEVICE);
4406 last = skb_shinfo(skb)->nr_frags;
4407 for (j = 0; j < last; j++) {
4408 tx_buf = &bp->tx_buf_ring[i + j + 1];
4409 pci_unmap_page(bp->pdev,
4410 pci_unmap_addr(tx_buf, mapping),
4411 skb_shinfo(skb)->frags[j].size,
4421 bnx2_free_rx_skbs(struct bnx2 *bp)
4425 if (bp->rx_buf_ring == NULL)
4428 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4429 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4430 struct sk_buff *skb = rx_buf->skb;
4435 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4436 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4442 for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4443 bnx2_free_rx_page(bp, i);
4447 bnx2_free_skbs(struct bnx2 *bp)
4449 bnx2_free_tx_skbs(bp);
4450 bnx2_free_rx_skbs(bp);
4454 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4458 rc = bnx2_reset_chip(bp, reset_code);
4463 if ((rc = bnx2_init_chip(bp)) != 0)
4466 bnx2_init_tx_ring(bp);
4467 bnx2_init_rx_ring(bp);
4472 bnx2_init_nic(struct bnx2 *bp)
4476 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4479 spin_lock_bh(&bp->phy_lock);
4482 spin_unlock_bh(&bp->phy_lock);
4487 bnx2_test_registers(struct bnx2 *bp)
4491 static const struct {
4494 #define BNX2_FL_NOT_5709 1
4498 { 0x006c, 0, 0x00000000, 0x0000003f },
4499 { 0x0090, 0, 0xffffffff, 0x00000000 },
4500 { 0x0094, 0, 0x00000000, 0x00000000 },
4502 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4503 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4504 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4505 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4506 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4507 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4508 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4509 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4510 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4512 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4513 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4514 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4515 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4516 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4517 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4519 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4520 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4521 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4523 { 0x1000, 0, 0x00000000, 0x00000001 },
4524 { 0x1004, 0, 0x00000000, 0x000f0001 },
4526 { 0x1408, 0, 0x01c00800, 0x00000000 },
4527 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4528 { 0x14a8, 0, 0x00000000, 0x000001ff },
4529 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4530 { 0x14b0, 0, 0x00000002, 0x00000001 },
4531 { 0x14b8, 0, 0x00000000, 0x00000000 },
4532 { 0x14c0, 0, 0x00000000, 0x00000009 },
4533 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4534 { 0x14cc, 0, 0x00000000, 0x00000001 },
4535 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4537 { 0x1800, 0, 0x00000000, 0x00000001 },
4538 { 0x1804, 0, 0x00000000, 0x00000003 },
4540 { 0x2800, 0, 0x00000000, 0x00000001 },
4541 { 0x2804, 0, 0x00000000, 0x00003f01 },
4542 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4543 { 0x2810, 0, 0xffff0000, 0x00000000 },
4544 { 0x2814, 0, 0xffff0000, 0x00000000 },
4545 { 0x2818, 0, 0xffff0000, 0x00000000 },
4546 { 0x281c, 0, 0xffff0000, 0x00000000 },
4547 { 0x2834, 0, 0xffffffff, 0x00000000 },
4548 { 0x2840, 0, 0x00000000, 0xffffffff },
4549 { 0x2844, 0, 0x00000000, 0xffffffff },
4550 { 0x2848, 0, 0xffffffff, 0x00000000 },
4551 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4553 { 0x2c00, 0, 0x00000000, 0x00000011 },
4554 { 0x2c04, 0, 0x00000000, 0x00030007 },
4556 { 0x3c00, 0, 0x00000000, 0x00000001 },
4557 { 0x3c04, 0, 0x00000000, 0x00070000 },
4558 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4559 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4560 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4561 { 0x3c14, 0, 0x00000000, 0xffffffff },
4562 { 0x3c18, 0, 0x00000000, 0xffffffff },
4563 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4564 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4566 { 0x5004, 0, 0x00000000, 0x0000007f },
4567 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4569 { 0x5c00, 0, 0x00000000, 0x00000001 },
4570 { 0x5c04, 0, 0x00000000, 0x0003000f },
4571 { 0x5c08, 0, 0x00000003, 0x00000000 },
4572 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4573 { 0x5c10, 0, 0x00000000, 0xffffffff },
4574 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4575 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4576 { 0x5c88, 0, 0x00000000, 0x00077373 },
4577 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4579 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4580 { 0x680c, 0, 0xffffffff, 0x00000000 },
4581 { 0x6810, 0, 0xffffffff, 0x00000000 },
4582 { 0x6814, 0, 0xffffffff, 0x00000000 },
4583 { 0x6818, 0, 0xffffffff, 0x00000000 },
4584 { 0x681c, 0, 0xffffffff, 0x00000000 },
4585 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4586 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4587 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4588 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4589 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4590 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4591 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4592 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4593 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4594 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4595 { 0x684c, 0, 0xffffffff, 0x00000000 },
4596 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4597 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4598 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4599 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4600 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4601 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4603 { 0xffff, 0, 0x00000000, 0x00000000 },
4608 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4611 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4612 u32 offset, rw_mask, ro_mask, save_val, val;
4613 u16 flags = reg_tbl[i].flags;
4615 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4618 offset = (u32) reg_tbl[i].offset;
4619 rw_mask = reg_tbl[i].rw_mask;
4620 ro_mask = reg_tbl[i].ro_mask;
4622 save_val = readl(bp->regview + offset);
4624 writel(0, bp->regview + offset);
4626 val = readl(bp->regview + offset);
4627 if ((val & rw_mask) != 0) {
4631 if ((val & ro_mask) != (save_val & ro_mask)) {
4635 writel(0xffffffff, bp->regview + offset);
4637 val = readl(bp->regview + offset);
4638 if ((val & rw_mask) != rw_mask) {
4642 if ((val & ro_mask) != (save_val & ro_mask)) {
4646 writel(save_val, bp->regview + offset);
4650 writel(save_val, bp->regview + offset);
4658 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4660 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4661 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4664 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4667 for (offset = 0; offset < size; offset += 4) {
4669 REG_WR_IND(bp, start + offset, test_pattern[i]);
4671 if (REG_RD_IND(bp, start + offset) !=
4681 bnx2_test_memory(struct bnx2 *bp)
4685 static struct mem_entry {
4688 } mem_tbl_5706[] = {
4689 { 0x60000, 0x4000 },
4690 { 0xa0000, 0x3000 },
4691 { 0xe0000, 0x4000 },
4692 { 0x120000, 0x4000 },
4693 { 0x1a0000, 0x4000 },
4694 { 0x160000, 0x4000 },
4698 { 0x60000, 0x4000 },
4699 { 0xa0000, 0x3000 },
4700 { 0xe0000, 0x4000 },
4701 { 0x120000, 0x4000 },
4702 { 0x1a0000, 0x4000 },
4705 struct mem_entry *mem_tbl;
4707 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4708 mem_tbl = mem_tbl_5709;
4710 mem_tbl = mem_tbl_5706;
4712 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4713 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4714 mem_tbl[i].len)) != 0) {
4722 #define BNX2_MAC_LOOPBACK 0
4723 #define BNX2_PHY_LOOPBACK 1
4726 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4728 unsigned int pkt_size, num_pkts, i;
4729 struct sk_buff *skb, *rx_skb;
4730 unsigned char *packet;
4731 u16 rx_start_idx, rx_idx;
4734 struct sw_bd *rx_buf;
4735 struct l2_fhdr *rx_hdr;
4738 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4739 bp->loopback = MAC_LOOPBACK;
4740 bnx2_set_mac_loopback(bp);
4742 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4743 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4746 bp->loopback = PHY_LOOPBACK;
4747 bnx2_set_phy_loopback(bp);
4753 skb = netdev_alloc_skb(bp->dev, pkt_size);
4756 packet = skb_put(skb, pkt_size);
4757 memcpy(packet, bp->dev->dev_addr, 6);
4758 memset(packet + 6, 0x0, 8);
4759 for (i = 14; i < pkt_size; i++)
4760 packet[i] = (unsigned char) (i & 0xff);
4762 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4765 REG_WR(bp, BNX2_HC_COMMAND,
4766 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4768 REG_RD(bp, BNX2_HC_COMMAND);
4771 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4775 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4777 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4778 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4779 txbd->tx_bd_mss_nbytes = pkt_size;
4780 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4783 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4784 bp->tx_prod_bseq += pkt_size;
4786 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4787 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4791 REG_WR(bp, BNX2_HC_COMMAND,
4792 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4794 REG_RD(bp, BNX2_HC_COMMAND);
4798 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4801 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4802 goto loopback_test_done;
4805 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4806 if (rx_idx != rx_start_idx + num_pkts) {
4807 goto loopback_test_done;
4810 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4811 rx_skb = rx_buf->skb;
4813 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4814 skb_reserve(rx_skb, bp->rx_offset);
4816 pci_dma_sync_single_for_cpu(bp->pdev,
4817 pci_unmap_addr(rx_buf, mapping),
4818 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4820 if (rx_hdr->l2_fhdr_status &
4821 (L2_FHDR_ERRORS_BAD_CRC |
4822 L2_FHDR_ERRORS_PHY_DECODE |
4823 L2_FHDR_ERRORS_ALIGNMENT |
4824 L2_FHDR_ERRORS_TOO_SHORT |
4825 L2_FHDR_ERRORS_GIANT_FRAME)) {
4827 goto loopback_test_done;
4830 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4831 goto loopback_test_done;
4834 for (i = 14; i < pkt_size; i++) {
4835 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4836 goto loopback_test_done;
4847 #define BNX2_MAC_LOOPBACK_FAILED 1
4848 #define BNX2_PHY_LOOPBACK_FAILED 2
4849 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4850 BNX2_PHY_LOOPBACK_FAILED)
4853 bnx2_test_loopback(struct bnx2 *bp)
4857 if (!netif_running(bp->dev))
4858 return BNX2_LOOPBACK_FAILED;
4860 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4861 spin_lock_bh(&bp->phy_lock);
4863 spin_unlock_bh(&bp->phy_lock);
4864 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4865 rc |= BNX2_MAC_LOOPBACK_FAILED;
4866 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4867 rc |= BNX2_PHY_LOOPBACK_FAILED;
4871 #define NVRAM_SIZE 0x200
4872 #define CRC32_RESIDUAL 0xdebb20e3
4875 bnx2_test_nvram(struct bnx2 *bp)
4877 u32 buf[NVRAM_SIZE / 4];
4878 u8 *data = (u8 *) buf;
4882 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4883 goto test_nvram_done;
4885 magic = be32_to_cpu(buf[0]);
4886 if (magic != 0x669955aa) {
4888 goto test_nvram_done;
4891 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4892 goto test_nvram_done;
4894 csum = ether_crc_le(0x100, data);
4895 if (csum != CRC32_RESIDUAL) {
4897 goto test_nvram_done;
4900 csum = ether_crc_le(0x100, data + 0x100);
4901 if (csum != CRC32_RESIDUAL) {
4910 bnx2_test_link(struct bnx2 *bp)
4914 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
4919 spin_lock_bh(&bp->phy_lock);
4920 bnx2_enable_bmsr1(bp);
4921 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4922 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4923 bnx2_disable_bmsr1(bp);
4924 spin_unlock_bh(&bp->phy_lock);
4926 if (bmsr & BMSR_LSTATUS) {
4933 bnx2_test_intr(struct bnx2 *bp)
4938 if (!netif_running(bp->dev))
4941 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4943 /* This register is not touched during run-time. */
4944 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4945 REG_RD(bp, BNX2_HC_COMMAND);
4947 for (i = 0; i < 10; i++) {
4948 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4954 msleep_interruptible(10);
4963 bnx2_5706_serdes_timer(struct bnx2 *bp)
4965 spin_lock(&bp->phy_lock);
4966 if (bp->serdes_an_pending)
4967 bp->serdes_an_pending--;
4968 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4971 bp->current_interval = bp->timer_interval;
4973 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4975 if (bmcr & BMCR_ANENABLE) {
4978 bnx2_write_phy(bp, 0x1c, 0x7c00);
4979 bnx2_read_phy(bp, 0x1c, &phy1);
4981 bnx2_write_phy(bp, 0x17, 0x0f01);
4982 bnx2_read_phy(bp, 0x15, &phy2);
4983 bnx2_write_phy(bp, 0x17, 0x0f01);
4984 bnx2_read_phy(bp, 0x15, &phy2);
4986 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4987 !(phy2 & 0x20)) { /* no CONFIG */
4989 bmcr &= ~BMCR_ANENABLE;
4990 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4991 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4992 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4996 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4997 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
5000 bnx2_write_phy(bp, 0x17, 0x0f01);
5001 bnx2_read_phy(bp, 0x15, &phy2);
5005 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5006 bmcr |= BMCR_ANENABLE;
5007 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5009 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
5012 bp->current_interval = bp->timer_interval;
5014 spin_unlock(&bp->phy_lock);
5018 bnx2_5708_serdes_timer(struct bnx2 *bp)
5020 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5023 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
5024 bp->serdes_an_pending = 0;
5028 spin_lock(&bp->phy_lock);
5029 if (bp->serdes_an_pending)
5030 bp->serdes_an_pending--;
5031 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5034 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5035 if (bmcr & BMCR_ANENABLE) {
5036 bnx2_enable_forced_2g5(bp);
5037 bp->current_interval = SERDES_FORCED_TIMEOUT;
5039 bnx2_disable_forced_2g5(bp);
5040 bp->serdes_an_pending = 2;
5041 bp->current_interval = bp->timer_interval;
5045 bp->current_interval = bp->timer_interval;
5047 spin_unlock(&bp->phy_lock);
5051 bnx2_timer(unsigned long data)
5053 struct bnx2 *bp = (struct bnx2 *) data;
5055 if (!netif_running(bp->dev))
5058 if (atomic_read(&bp->intr_sem) != 0)
5059 goto bnx2_restart_timer;
5061 bnx2_send_heart_beat(bp);
5063 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
5065 /* workaround occasional corrupted counters */
5066 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5067 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5068 BNX2_HC_COMMAND_STATS_NOW);
5070 if (bp->phy_flags & PHY_SERDES_FLAG) {
5071 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5072 bnx2_5706_serdes_timer(bp);
5074 bnx2_5708_serdes_timer(bp);
5078 mod_timer(&bp->timer, jiffies + bp->current_interval);
5082 bnx2_request_irq(struct bnx2 *bp)
5084 struct net_device *dev = bp->dev;
5087 if (bp->flags & USING_MSI_FLAG) {
5088 irq_handler_t fn = bnx2_msi;
5090 if (bp->flags & ONE_SHOT_MSI_FLAG)
5091 fn = bnx2_msi_1shot;
5093 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
5095 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
5096 IRQF_SHARED, dev->name, dev);
5101 bnx2_free_irq(struct bnx2 *bp)
5103 struct net_device *dev = bp->dev;
5105 if (bp->flags & USING_MSI_FLAG) {
5106 free_irq(bp->pdev->irq, dev);
5107 pci_disable_msi(bp->pdev);
5108 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
5110 free_irq(bp->pdev->irq, dev);
5113 /* Called with rtnl_lock */
5115 bnx2_open(struct net_device *dev)
5117 struct bnx2 *bp = netdev_priv(dev);
5120 netif_carrier_off(dev);
5122 bnx2_set_power_state(bp, PCI_D0);
5123 bnx2_disable_int(bp);
5125 rc = bnx2_alloc_mem(bp);
5129 napi_enable(&bp->napi);
5131 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
5132 if (pci_enable_msi(bp->pdev) == 0) {
5133 bp->flags |= USING_MSI_FLAG;
5134 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5135 bp->flags |= ONE_SHOT_MSI_FLAG;
5138 rc = bnx2_request_irq(bp);
5141 napi_disable(&bp->napi);
5146 rc = bnx2_init_nic(bp);
5149 napi_disable(&bp->napi);
5156 mod_timer(&bp->timer, jiffies + bp->current_interval);
5158 atomic_set(&bp->intr_sem, 0);
5160 bnx2_enable_int(bp);
5162 if (bp->flags & USING_MSI_FLAG) {
5163 /* Test MSI to make sure it is working
5164 * If MSI test fails, go back to INTx mode
5166 if (bnx2_test_intr(bp) != 0) {
5167 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5168 " using MSI, switching to INTx mode. Please"
5169 " report this failure to the PCI maintainer"
5170 " and include system chipset information.\n",
5173 bnx2_disable_int(bp);
5176 rc = bnx2_init_nic(bp);
5179 rc = bnx2_request_irq(bp);
5182 napi_disable(&bp->napi);
5185 del_timer_sync(&bp->timer);
5188 bnx2_enable_int(bp);
5191 if (bp->flags & USING_MSI_FLAG) {
5192 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5195 netif_start_queue(dev);
5201 bnx2_reset_task(struct work_struct *work)
5203 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5205 if (!netif_running(bp->dev))
5208 bp->in_reset_task = 1;
5209 bnx2_netif_stop(bp);
5213 atomic_set(&bp->intr_sem, 1);
5214 bnx2_netif_start(bp);
5215 bp->in_reset_task = 0;
5219 bnx2_tx_timeout(struct net_device *dev)
5221 struct bnx2 *bp = netdev_priv(dev);
5223 /* This allows the netif to be shutdown gracefully before resetting */
5224 schedule_work(&bp->reset_task);
5228 /* Called with rtnl_lock */
5230 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5232 struct bnx2 *bp = netdev_priv(dev);
5234 bnx2_netif_stop(bp);
5237 bnx2_set_rx_mode(dev);
5239 bnx2_netif_start(bp);
5243 /* Called with netif_tx_lock.
5244 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5245 * netif_wake_queue().
5248 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5250 struct bnx2 *bp = netdev_priv(dev);
5253 struct sw_bd *tx_buf;
5254 u32 len, vlan_tag_flags, last_frag, mss;
5255 u16 prod, ring_prod;
5258 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5259 netif_stop_queue(dev);
5260 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5263 return NETDEV_TX_BUSY;
5265 len = skb_headlen(skb);
5267 ring_prod = TX_RING_IDX(prod);
5270 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5271 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5274 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5276 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5278 if ((mss = skb_shinfo(skb)->gso_size)) {
5279 u32 tcp_opt_len, ip_tcp_len;
5282 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5284 tcp_opt_len = tcp_optlen(skb);
5286 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5287 u32 tcp_off = skb_transport_offset(skb) -
5288 sizeof(struct ipv6hdr) - ETH_HLEN;
5290 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5291 TX_BD_FLAGS_SW_FLAGS;
5292 if (likely(tcp_off == 0))
5293 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5296 vlan_tag_flags |= ((tcp_off & 0x3) <<
5297 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5298 ((tcp_off & 0x10) <<
5299 TX_BD_FLAGS_TCP6_OFF4_SHL);
5300 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5303 if (skb_header_cloned(skb) &&
5304 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5306 return NETDEV_TX_OK;
5309 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5313 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5314 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5318 if (tcp_opt_len || (iph->ihl > 5)) {
5319 vlan_tag_flags |= ((iph->ihl - 5) +
5320 (tcp_opt_len >> 2)) << 8;
5326 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5328 tx_buf = &bp->tx_buf_ring[ring_prod];
5330 pci_unmap_addr_set(tx_buf, mapping, mapping);
5332 txbd = &bp->tx_desc_ring[ring_prod];
5334 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5335 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5336 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5337 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5339 last_frag = skb_shinfo(skb)->nr_frags;
5341 for (i = 0; i < last_frag; i++) {
5342 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5344 prod = NEXT_TX_BD(prod);
5345 ring_prod = TX_RING_IDX(prod);
5346 txbd = &bp->tx_desc_ring[ring_prod];
5349 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5350 len, PCI_DMA_TODEVICE);
5351 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5354 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5355 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5356 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5357 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5360 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5362 prod = NEXT_TX_BD(prod);
5363 bp->tx_prod_bseq += skb->len;
5365 REG_WR16(bp, bp->tx_bidx_addr, prod);
5366 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5371 dev->trans_start = jiffies;
5373 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5374 netif_stop_queue(dev);
5375 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5376 netif_wake_queue(dev);
5379 return NETDEV_TX_OK;
5382 /* Called with rtnl_lock */
5384 bnx2_close(struct net_device *dev)
5386 struct bnx2 *bp = netdev_priv(dev);
5389 /* Calling flush_scheduled_work() may deadlock because
5390 * linkwatch_event() may be on the workqueue and it will try to get
5391 * the rtnl_lock which we are holding.
5393 while (bp->in_reset_task)
5396 bnx2_disable_int_sync(bp);
5397 napi_disable(&bp->napi);
5398 del_timer_sync(&bp->timer);
5399 if (bp->flags & NO_WOL_FLAG)
5400 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5402 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5404 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5405 bnx2_reset_chip(bp, reset_code);
5410 netif_carrier_off(bp->dev);
5411 bnx2_set_power_state(bp, PCI_D3hot);
5415 #define GET_NET_STATS64(ctr) \
5416 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5417 (unsigned long) (ctr##_lo)
5419 #define GET_NET_STATS32(ctr) \
5422 #if (BITS_PER_LONG == 64)
5423 #define GET_NET_STATS GET_NET_STATS64
5425 #define GET_NET_STATS GET_NET_STATS32
5428 static struct net_device_stats *
5429 bnx2_get_stats(struct net_device *dev)
5431 struct bnx2 *bp = netdev_priv(dev);
5432 struct statistics_block *stats_blk = bp->stats_blk;
5433 struct net_device_stats *net_stats = &bp->net_stats;
5435 if (bp->stats_blk == NULL) {
5438 net_stats->rx_packets =
5439 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5440 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5441 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5443 net_stats->tx_packets =
5444 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5445 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5446 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5448 net_stats->rx_bytes =
5449 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5451 net_stats->tx_bytes =
5452 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5454 net_stats->multicast =
5455 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5457 net_stats->collisions =
5458 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5460 net_stats->rx_length_errors =
5461 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5462 stats_blk->stat_EtherStatsOverrsizePkts);
5464 net_stats->rx_over_errors =
5465 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5467 net_stats->rx_frame_errors =
5468 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5470 net_stats->rx_crc_errors =
5471 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5473 net_stats->rx_errors = net_stats->rx_length_errors +
5474 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5475 net_stats->rx_crc_errors;
5477 net_stats->tx_aborted_errors =
5478 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5479 stats_blk->stat_Dot3StatsLateCollisions);
5481 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5482 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5483 net_stats->tx_carrier_errors = 0;
5485 net_stats->tx_carrier_errors =
5487 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5490 net_stats->tx_errors =
5492 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5494 net_stats->tx_aborted_errors +
5495 net_stats->tx_carrier_errors;
5497 net_stats->rx_missed_errors =
5498 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5499 stats_blk->stat_FwRxDrop);
5504 /* All ethtool functions called with rtnl_lock */
5507 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5509 struct bnx2 *bp = netdev_priv(dev);
5510 int support_serdes = 0, support_copper = 0;
5512 cmd->supported = SUPPORTED_Autoneg;
5513 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5516 } else if (bp->phy_port == PORT_FIBRE)
5521 if (support_serdes) {
5522 cmd->supported |= SUPPORTED_1000baseT_Full |
5524 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5525 cmd->supported |= SUPPORTED_2500baseX_Full;
5528 if (support_copper) {
5529 cmd->supported |= SUPPORTED_10baseT_Half |
5530 SUPPORTED_10baseT_Full |
5531 SUPPORTED_100baseT_Half |
5532 SUPPORTED_100baseT_Full |
5533 SUPPORTED_1000baseT_Full |
5538 spin_lock_bh(&bp->phy_lock);
5539 cmd->port = bp->phy_port;
5540 cmd->advertising = bp->advertising;
5542 if (bp->autoneg & AUTONEG_SPEED) {
5543 cmd->autoneg = AUTONEG_ENABLE;
5546 cmd->autoneg = AUTONEG_DISABLE;
5549 if (netif_carrier_ok(dev)) {
5550 cmd->speed = bp->line_speed;
5551 cmd->duplex = bp->duplex;
5557 spin_unlock_bh(&bp->phy_lock);
5559 cmd->transceiver = XCVR_INTERNAL;
5560 cmd->phy_address = bp->phy_addr;
5566 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5568 struct bnx2 *bp = netdev_priv(dev);
5569 u8 autoneg = bp->autoneg;
5570 u8 req_duplex = bp->req_duplex;
5571 u16 req_line_speed = bp->req_line_speed;
5572 u32 advertising = bp->advertising;
5575 spin_lock_bh(&bp->phy_lock);
5577 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5578 goto err_out_unlock;
5580 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5581 goto err_out_unlock;
5583 if (cmd->autoneg == AUTONEG_ENABLE) {
5584 autoneg |= AUTONEG_SPEED;
5586 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5588 /* allow advertising 1 speed */
5589 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5590 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5591 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5592 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5594 if (cmd->port == PORT_FIBRE)
5595 goto err_out_unlock;
5597 advertising = cmd->advertising;
5599 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5600 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5601 (cmd->port == PORT_TP))
5602 goto err_out_unlock;
5603 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5604 advertising = cmd->advertising;
5605 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5606 goto err_out_unlock;
5608 if (cmd->port == PORT_FIBRE)
5609 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5611 advertising = ETHTOOL_ALL_COPPER_SPEED;
5613 advertising |= ADVERTISED_Autoneg;
5616 if (cmd->port == PORT_FIBRE) {
5617 if ((cmd->speed != SPEED_1000 &&
5618 cmd->speed != SPEED_2500) ||
5619 (cmd->duplex != DUPLEX_FULL))
5620 goto err_out_unlock;
5622 if (cmd->speed == SPEED_2500 &&
5623 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5624 goto err_out_unlock;
5626 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5627 goto err_out_unlock;
5629 autoneg &= ~AUTONEG_SPEED;
5630 req_line_speed = cmd->speed;
5631 req_duplex = cmd->duplex;
5635 bp->autoneg = autoneg;
5636 bp->advertising = advertising;
5637 bp->req_line_speed = req_line_speed;
5638 bp->req_duplex = req_duplex;
5640 err = bnx2_setup_phy(bp, cmd->port);
5643 spin_unlock_bh(&bp->phy_lock);
5649 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5651 struct bnx2 *bp = netdev_priv(dev);
5653 strcpy(info->driver, DRV_MODULE_NAME);
5654 strcpy(info->version, DRV_MODULE_VERSION);
5655 strcpy(info->bus_info, pci_name(bp->pdev));
5656 strcpy(info->fw_version, bp->fw_version);
5659 #define BNX2_REGDUMP_LEN (32 * 1024)
5662 bnx2_get_regs_len(struct net_device *dev)
5664 return BNX2_REGDUMP_LEN;
5668 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5670 u32 *p = _p, i, offset;
5672 struct bnx2 *bp = netdev_priv(dev);
5673 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5674 0x0800, 0x0880, 0x0c00, 0x0c10,
5675 0x0c30, 0x0d08, 0x1000, 0x101c,
5676 0x1040, 0x1048, 0x1080, 0x10a4,
5677 0x1400, 0x1490, 0x1498, 0x14f0,
5678 0x1500, 0x155c, 0x1580, 0x15dc,
5679 0x1600, 0x1658, 0x1680, 0x16d8,
5680 0x1800, 0x1820, 0x1840, 0x1854,
5681 0x1880, 0x1894, 0x1900, 0x1984,
5682 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5683 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5684 0x2000, 0x2030, 0x23c0, 0x2400,
5685 0x2800, 0x2820, 0x2830, 0x2850,
5686 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5687 0x3c00, 0x3c94, 0x4000, 0x4010,
5688 0x4080, 0x4090, 0x43c0, 0x4458,
5689 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5690 0x4fc0, 0x5010, 0x53c0, 0x5444,
5691 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5692 0x5fc0, 0x6000, 0x6400, 0x6428,
5693 0x6800, 0x6848, 0x684c, 0x6860,
5694 0x6888, 0x6910, 0x8000 };
5698 memset(p, 0, BNX2_REGDUMP_LEN);
5700 if (!netif_running(bp->dev))
5704 offset = reg_boundaries[0];
5706 while (offset < BNX2_REGDUMP_LEN) {
5707 *p++ = REG_RD(bp, offset);
5709 if (offset == reg_boundaries[i + 1]) {
5710 offset = reg_boundaries[i + 2];
5711 p = (u32 *) (orig_p + offset);
5718 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5720 struct bnx2 *bp = netdev_priv(dev);
5722 if (bp->flags & NO_WOL_FLAG) {
5727 wol->supported = WAKE_MAGIC;
5729 wol->wolopts = WAKE_MAGIC;
5733 memset(&wol->sopass, 0, sizeof(wol->sopass));
5737 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5739 struct bnx2 *bp = netdev_priv(dev);
5741 if (wol->wolopts & ~WAKE_MAGIC)
5744 if (wol->wolopts & WAKE_MAGIC) {
5745 if (bp->flags & NO_WOL_FLAG)
5757 bnx2_nway_reset(struct net_device *dev)
5759 struct bnx2 *bp = netdev_priv(dev);
5762 if (!(bp->autoneg & AUTONEG_SPEED)) {
5766 spin_lock_bh(&bp->phy_lock);
5768 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5771 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5772 spin_unlock_bh(&bp->phy_lock);
5776 /* Force a link down visible on the other side */
5777 if (bp->phy_flags & PHY_SERDES_FLAG) {
5778 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5779 spin_unlock_bh(&bp->phy_lock);
5783 spin_lock_bh(&bp->phy_lock);
5785 bp->current_interval = SERDES_AN_TIMEOUT;
5786 bp->serdes_an_pending = 1;
5787 mod_timer(&bp->timer, jiffies + bp->current_interval);
5790 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5791 bmcr &= ~BMCR_LOOPBACK;
5792 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5794 spin_unlock_bh(&bp->phy_lock);
5800 bnx2_get_eeprom_len(struct net_device *dev)
5802 struct bnx2 *bp = netdev_priv(dev);
5804 if (bp->flash_info == NULL)
5807 return (int) bp->flash_size;
5811 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5814 struct bnx2 *bp = netdev_priv(dev);
5817 /* parameters already validated in ethtool_get_eeprom */
5819 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5825 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5828 struct bnx2 *bp = netdev_priv(dev);
5831 /* parameters already validated in ethtool_set_eeprom */
5833 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5839 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5841 struct bnx2 *bp = netdev_priv(dev);
5843 memset(coal, 0, sizeof(struct ethtool_coalesce));
5845 coal->rx_coalesce_usecs = bp->rx_ticks;
5846 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5847 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5848 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5850 coal->tx_coalesce_usecs = bp->tx_ticks;
5851 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5852 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5853 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5855 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5861 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5863 struct bnx2 *bp = netdev_priv(dev);
5865 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5866 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5868 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5869 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5871 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5872 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5874 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5875 if (bp->rx_quick_cons_trip_int > 0xff)
5876 bp->rx_quick_cons_trip_int = 0xff;
5878 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5879 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5881 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5882 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5884 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5885 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5887 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5888 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5891 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5892 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5893 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5894 bp->stats_ticks = USEC_PER_SEC;
5896 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
5897 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5898 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5900 if (netif_running(bp->dev)) {
5901 bnx2_netif_stop(bp);
5903 bnx2_netif_start(bp);
5910 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5912 struct bnx2 *bp = netdev_priv(dev);
5914 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5915 ering->rx_mini_max_pending = 0;
5916 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
5918 ering->rx_pending = bp->rx_ring_size;
5919 ering->rx_mini_pending = 0;
5920 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
5922 ering->tx_max_pending = MAX_TX_DESC_CNT;
5923 ering->tx_pending = bp->tx_ring_size;
5927 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
5929 if (netif_running(bp->dev)) {
5930 bnx2_netif_stop(bp);
5931 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5936 bnx2_set_rx_ring_size(bp, rx);
5937 bp->tx_ring_size = tx;
5939 if (netif_running(bp->dev)) {
5942 rc = bnx2_alloc_mem(bp);
5946 bnx2_netif_start(bp);
5952 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5954 struct bnx2 *bp = netdev_priv(dev);
5957 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5958 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5959 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5963 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
5968 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5970 struct bnx2 *bp = netdev_priv(dev);
5972 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5973 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5974 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5978 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5980 struct bnx2 *bp = netdev_priv(dev);
5982 bp->req_flow_ctrl = 0;
5983 if (epause->rx_pause)
5984 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5985 if (epause->tx_pause)
5986 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5988 if (epause->autoneg) {
5989 bp->autoneg |= AUTONEG_FLOW_CTRL;
5992 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5995 spin_lock_bh(&bp->phy_lock);
5997 bnx2_setup_phy(bp, bp->phy_port);
5999 spin_unlock_bh(&bp->phy_lock);
6005 bnx2_get_rx_csum(struct net_device *dev)
6007 struct bnx2 *bp = netdev_priv(dev);
6013 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6015 struct bnx2 *bp = netdev_priv(dev);
6022 bnx2_set_tso(struct net_device *dev, u32 data)
6024 struct bnx2 *bp = netdev_priv(dev);
6027 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6028 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6029 dev->features |= NETIF_F_TSO6;
6031 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6036 #define BNX2_NUM_STATS 46
6039 char string[ETH_GSTRING_LEN];
6040 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6042 { "rx_error_bytes" },
6044 { "tx_error_bytes" },
6045 { "rx_ucast_packets" },
6046 { "rx_mcast_packets" },
6047 { "rx_bcast_packets" },
6048 { "tx_ucast_packets" },
6049 { "tx_mcast_packets" },
6050 { "tx_bcast_packets" },
6051 { "tx_mac_errors" },
6052 { "tx_carrier_errors" },
6053 { "rx_crc_errors" },
6054 { "rx_align_errors" },
6055 { "tx_single_collisions" },
6056 { "tx_multi_collisions" },
6058 { "tx_excess_collisions" },
6059 { "tx_late_collisions" },
6060 { "tx_total_collisions" },
6063 { "rx_undersize_packets" },
6064 { "rx_oversize_packets" },
6065 { "rx_64_byte_packets" },
6066 { "rx_65_to_127_byte_packets" },
6067 { "rx_128_to_255_byte_packets" },
6068 { "rx_256_to_511_byte_packets" },
6069 { "rx_512_to_1023_byte_packets" },
6070 { "rx_1024_to_1522_byte_packets" },
6071 { "rx_1523_to_9022_byte_packets" },
6072 { "tx_64_byte_packets" },
6073 { "tx_65_to_127_byte_packets" },
6074 { "tx_128_to_255_byte_packets" },
6075 { "tx_256_to_511_byte_packets" },
6076 { "tx_512_to_1023_byte_packets" },
6077 { "tx_1024_to_1522_byte_packets" },
6078 { "tx_1523_to_9022_byte_packets" },
6079 { "rx_xon_frames" },
6080 { "rx_xoff_frames" },
6081 { "tx_xon_frames" },
6082 { "tx_xoff_frames" },
6083 { "rx_mac_ctrl_frames" },
6084 { "rx_filtered_packets" },
6086 { "rx_fw_discards" },
6089 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6091 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6092 STATS_OFFSET32(stat_IfHCInOctets_hi),
6093 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6094 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6095 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6096 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6097 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6098 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6099 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6100 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6101 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6102 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6103 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6104 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6105 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6106 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6107 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6108 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6109 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6110 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6111 STATS_OFFSET32(stat_EtherStatsCollisions),
6112 STATS_OFFSET32(stat_EtherStatsFragments),
6113 STATS_OFFSET32(stat_EtherStatsJabbers),
6114 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6115 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6116 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6117 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6118 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6119 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6120 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6121 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6122 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6123 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6124 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6125 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6126 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6127 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6128 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6129 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6130 STATS_OFFSET32(stat_XonPauseFramesReceived),
6131 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6132 STATS_OFFSET32(stat_OutXonSent),
6133 STATS_OFFSET32(stat_OutXoffSent),
6134 STATS_OFFSET32(stat_MacControlFramesReceived),
6135 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6136 STATS_OFFSET32(stat_IfInMBUFDiscards),
6137 STATS_OFFSET32(stat_FwRxDrop),
6140 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6141 * skipped because of errata.
6143 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6144 8,0,8,8,8,8,8,8,8,8,
6145 4,0,4,4,4,4,4,4,4,4,
6146 4,4,4,4,4,4,4,4,4,4,
6147 4,4,4,4,4,4,4,4,4,4,
6151 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6152 8,0,8,8,8,8,8,8,8,8,
6153 4,4,4,4,4,4,4,4,4,4,
6154 4,4,4,4,4,4,4,4,4,4,
6155 4,4,4,4,4,4,4,4,4,4,
6159 #define BNX2_NUM_TESTS 6
6162 char string[ETH_GSTRING_LEN];
6163 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6164 { "register_test (offline)" },
6165 { "memory_test (offline)" },
6166 { "loopback_test (offline)" },
6167 { "nvram_test (online)" },
6168 { "interrupt_test (online)" },
6169 { "link_test (online)" },
6173 bnx2_get_sset_count(struct net_device *dev, int sset)
6177 return BNX2_NUM_TESTS;
6179 return BNX2_NUM_STATS;
6186 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6188 struct bnx2 *bp = netdev_priv(dev);
6190 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6191 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6194 bnx2_netif_stop(bp);
6195 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6198 if (bnx2_test_registers(bp) != 0) {
6200 etest->flags |= ETH_TEST_FL_FAILED;
6202 if (bnx2_test_memory(bp) != 0) {
6204 etest->flags |= ETH_TEST_FL_FAILED;
6206 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6207 etest->flags |= ETH_TEST_FL_FAILED;
6209 if (!netif_running(bp->dev)) {
6210 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6214 bnx2_netif_start(bp);
6217 /* wait for link up */
6218 for (i = 0; i < 7; i++) {
6221 msleep_interruptible(1000);
6225 if (bnx2_test_nvram(bp) != 0) {
6227 etest->flags |= ETH_TEST_FL_FAILED;
6229 if (bnx2_test_intr(bp) != 0) {
6231 etest->flags |= ETH_TEST_FL_FAILED;
6234 if (bnx2_test_link(bp) != 0) {
6236 etest->flags |= ETH_TEST_FL_FAILED;
6242 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6244 switch (stringset) {
6246 memcpy(buf, bnx2_stats_str_arr,
6247 sizeof(bnx2_stats_str_arr));
6250 memcpy(buf, bnx2_tests_str_arr,
6251 sizeof(bnx2_tests_str_arr));
6257 bnx2_get_ethtool_stats(struct net_device *dev,
6258 struct ethtool_stats *stats, u64 *buf)
6260 struct bnx2 *bp = netdev_priv(dev);
6262 u32 *hw_stats = (u32 *) bp->stats_blk;
6263 u8 *stats_len_arr = NULL;
6265 if (hw_stats == NULL) {
6266 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6270 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6271 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6272 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6273 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6274 stats_len_arr = bnx2_5706_stats_len_arr;
6276 stats_len_arr = bnx2_5708_stats_len_arr;
6278 for (i = 0; i < BNX2_NUM_STATS; i++) {
6279 if (stats_len_arr[i] == 0) {
6280 /* skip this counter */
6284 if (stats_len_arr[i] == 4) {
6285 /* 4-byte counter */
6287 *(hw_stats + bnx2_stats_offset_arr[i]);
6290 /* 8-byte counter */
6291 buf[i] = (((u64) *(hw_stats +
6292 bnx2_stats_offset_arr[i])) << 32) +
6293 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6298 bnx2_phys_id(struct net_device *dev, u32 data)
6300 struct bnx2 *bp = netdev_priv(dev);
6307 save = REG_RD(bp, BNX2_MISC_CFG);
6308 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6310 for (i = 0; i < (data * 2); i++) {
6312 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6315 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6316 BNX2_EMAC_LED_1000MB_OVERRIDE |
6317 BNX2_EMAC_LED_100MB_OVERRIDE |
6318 BNX2_EMAC_LED_10MB_OVERRIDE |
6319 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6320 BNX2_EMAC_LED_TRAFFIC);
6322 msleep_interruptible(500);
6323 if (signal_pending(current))
6326 REG_WR(bp, BNX2_EMAC_LED, 0);
6327 REG_WR(bp, BNX2_MISC_CFG, save);
6332 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6334 struct bnx2 *bp = netdev_priv(dev);
6336 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6337 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6339 return (ethtool_op_set_tx_csum(dev, data));
6342 static const struct ethtool_ops bnx2_ethtool_ops = {
6343 .get_settings = bnx2_get_settings,
6344 .set_settings = bnx2_set_settings,
6345 .get_drvinfo = bnx2_get_drvinfo,
6346 .get_regs_len = bnx2_get_regs_len,
6347 .get_regs = bnx2_get_regs,
6348 .get_wol = bnx2_get_wol,
6349 .set_wol = bnx2_set_wol,
6350 .nway_reset = bnx2_nway_reset,
6351 .get_link = ethtool_op_get_link,
6352 .get_eeprom_len = bnx2_get_eeprom_len,
6353 .get_eeprom = bnx2_get_eeprom,
6354 .set_eeprom = bnx2_set_eeprom,
6355 .get_coalesce = bnx2_get_coalesce,
6356 .set_coalesce = bnx2_set_coalesce,
6357 .get_ringparam = bnx2_get_ringparam,
6358 .set_ringparam = bnx2_set_ringparam,
6359 .get_pauseparam = bnx2_get_pauseparam,
6360 .set_pauseparam = bnx2_set_pauseparam,
6361 .get_rx_csum = bnx2_get_rx_csum,
6362 .set_rx_csum = bnx2_set_rx_csum,
6363 .set_tx_csum = bnx2_set_tx_csum,
6364 .set_sg = ethtool_op_set_sg,
6365 .set_tso = bnx2_set_tso,
6366 .self_test = bnx2_self_test,
6367 .get_strings = bnx2_get_strings,
6368 .phys_id = bnx2_phys_id,
6369 .get_ethtool_stats = bnx2_get_ethtool_stats,
6370 .get_sset_count = bnx2_get_sset_count,
6373 /* Called with rtnl_lock */
6375 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6377 struct mii_ioctl_data *data = if_mii(ifr);
6378 struct bnx2 *bp = netdev_priv(dev);
6383 data->phy_id = bp->phy_addr;
6389 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6392 if (!netif_running(dev))
6395 spin_lock_bh(&bp->phy_lock);
6396 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6397 spin_unlock_bh(&bp->phy_lock);
6399 data->val_out = mii_regval;
6405 if (!capable(CAP_NET_ADMIN))
6408 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6411 if (!netif_running(dev))
6414 spin_lock_bh(&bp->phy_lock);
6415 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6416 spin_unlock_bh(&bp->phy_lock);
6427 /* Called with rtnl_lock */
6429 bnx2_change_mac_addr(struct net_device *dev, void *p)
6431 struct sockaddr *addr = p;
6432 struct bnx2 *bp = netdev_priv(dev);
6434 if (!is_valid_ether_addr(addr->sa_data))
6437 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6438 if (netif_running(dev))
6439 bnx2_set_mac_addr(bp);
6444 /* Called with rtnl_lock */
6446 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6448 struct bnx2 *bp = netdev_priv(dev);
6450 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6451 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6455 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
6458 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6460 poll_bnx2(struct net_device *dev)
6462 struct bnx2 *bp = netdev_priv(dev);
6464 disable_irq(bp->pdev->irq);
6465 bnx2_interrupt(bp->pdev->irq, dev);
6466 enable_irq(bp->pdev->irq);
6470 static void __devinit
6471 bnx2_get_5709_media(struct bnx2 *bp)
6473 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6474 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6477 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6479 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6480 bp->phy_flags |= PHY_SERDES_FLAG;
6484 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6485 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6487 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6489 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6494 bp->phy_flags |= PHY_SERDES_FLAG;
6502 bp->phy_flags |= PHY_SERDES_FLAG;
6508 static void __devinit
6509 bnx2_get_pci_speed(struct bnx2 *bp)
6513 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6514 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6517 bp->flags |= PCIX_FLAG;
6519 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6521 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6523 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6524 bp->bus_speed_mhz = 133;
6527 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6528 bp->bus_speed_mhz = 100;
6531 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6532 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6533 bp->bus_speed_mhz = 66;
6536 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6537 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6538 bp->bus_speed_mhz = 50;
6541 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6542 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6543 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6544 bp->bus_speed_mhz = 33;
6549 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6550 bp->bus_speed_mhz = 66;
6552 bp->bus_speed_mhz = 33;
6555 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6556 bp->flags |= PCI_32BIT_FLAG;
6560 static int __devinit
6561 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6564 unsigned long mem_len;
6567 u64 dma_mask, persist_dma_mask;
6569 SET_NETDEV_DEV(dev, &pdev->dev);
6570 bp = netdev_priv(dev);
6575 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6576 rc = pci_enable_device(pdev);
6578 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
6582 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6584 "Cannot find PCI device base address, aborting.\n");
6586 goto err_out_disable;
6589 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6591 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6592 goto err_out_disable;
6595 pci_set_master(pdev);
6597 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6598 if (bp->pm_cap == 0) {
6600 "Cannot find power management capability, aborting.\n");
6602 goto err_out_release;
6608 spin_lock_init(&bp->phy_lock);
6609 spin_lock_init(&bp->indirect_lock);
6610 INIT_WORK(&bp->reset_task, bnx2_reset_task);
6612 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6613 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6614 dev->mem_end = dev->mem_start + mem_len;
6615 dev->irq = pdev->irq;
6617 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6620 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6622 goto err_out_release;
6625 /* Configure byte swap and enable write to the reg_window registers.
6626 * Rely on CPU to do target byte swapping on big endian systems
6627 * The chip's target access swapping will not swap all accesses
6629 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6630 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6631 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6633 bnx2_set_power_state(bp, PCI_D0);
6635 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6637 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6638 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6640 "Cannot find PCIE capability, aborting.\n");
6644 bp->flags |= PCIE_FLAG;
6646 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6647 if (bp->pcix_cap == 0) {
6649 "Cannot find PCIX capability, aborting.\n");
6655 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6656 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6657 bp->flags |= MSI_CAP_FLAG;
6660 /* 5708 cannot support DMA addresses > 40-bit. */
6661 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6662 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6664 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6666 /* Configure DMA attributes. */
6667 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6668 dev->features |= NETIF_F_HIGHDMA;
6669 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6672 "pci_set_consistent_dma_mask failed, aborting.\n");
6675 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6676 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6680 if (!(bp->flags & PCIE_FLAG))
6681 bnx2_get_pci_speed(bp);
6683 /* 5706A0 may falsely detect SERR and PERR. */
6684 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6685 reg = REG_RD(bp, PCI_COMMAND);
6686 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6687 REG_WR(bp, PCI_COMMAND, reg);
6689 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6690 !(bp->flags & PCIX_FLAG)) {
6693 "5706 A1 can only be used in a PCIX bus, aborting.\n");
6697 bnx2_init_nvram(bp);
6699 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6701 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6702 BNX2_SHM_HDR_SIGNATURE_SIG) {
6703 u32 off = PCI_FUNC(pdev->devfn) << 2;
6705 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6707 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6709 /* Get the permanent MAC address. First we need to make sure the
6710 * firmware is actually running.
6712 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6714 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6715 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6716 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6721 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6722 for (i = 0, j = 0; i < 3; i++) {
6725 num = (u8) (reg >> (24 - (i * 8)));
6726 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6727 if (num >= k || !skip0 || k == 1) {
6728 bp->fw_version[j++] = (num / k) + '0';
6733 bp->fw_version[j++] = '.';
6735 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
6736 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
6739 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
6740 bp->flags |= ASF_ENABLE_FLAG;
6742 for (i = 0; i < 30; i++) {
6743 reg = REG_RD_IND(bp, bp->shmem_base +
6744 BNX2_BC_STATE_CONDITION);
6745 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6750 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6751 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6752 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6753 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6755 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6757 bp->fw_version[j++] = ' ';
6758 for (i = 0; i < 3; i++) {
6759 reg = REG_RD_IND(bp, addr + i * 4);
6761 memcpy(&bp->fw_version[j], ®, 4);
6766 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6767 bp->mac_addr[0] = (u8) (reg >> 8);
6768 bp->mac_addr[1] = (u8) reg;
6770 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6771 bp->mac_addr[2] = (u8) (reg >> 24);
6772 bp->mac_addr[3] = (u8) (reg >> 16);
6773 bp->mac_addr[4] = (u8) (reg >> 8);
6774 bp->mac_addr[5] = (u8) reg;
6776 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6778 bp->tx_ring_size = MAX_TX_DESC_CNT;
6779 bnx2_set_rx_ring_size(bp, 255);
6783 bp->tx_quick_cons_trip_int = 20;
6784 bp->tx_quick_cons_trip = 20;
6785 bp->tx_ticks_int = 80;
6788 bp->rx_quick_cons_trip_int = 6;
6789 bp->rx_quick_cons_trip = 6;
6790 bp->rx_ticks_int = 18;
6793 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6795 bp->timer_interval = HZ;
6796 bp->current_interval = HZ;
6800 /* Disable WOL support if we are running on a SERDES chip. */
6801 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6802 bnx2_get_5709_media(bp);
6803 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6804 bp->phy_flags |= PHY_SERDES_FLAG;
6806 bp->phy_port = PORT_TP;
6807 if (bp->phy_flags & PHY_SERDES_FLAG) {
6808 bp->phy_port = PORT_FIBRE;
6809 reg = REG_RD_IND(bp, bp->shmem_base +
6810 BNX2_SHARED_HW_CFG_CONFIG);
6811 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
6812 bp->flags |= NO_WOL_FLAG;
6815 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6817 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6818 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6820 bnx2_init_remote_phy(bp);
6822 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6823 CHIP_NUM(bp) == CHIP_NUM_5708)
6824 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6825 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
6826 (CHIP_REV(bp) == CHIP_REV_Ax ||
6827 CHIP_REV(bp) == CHIP_REV_Bx))
6828 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6830 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6831 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6832 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
6833 bp->flags |= NO_WOL_FLAG;
6837 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6838 bp->tx_quick_cons_trip_int =
6839 bp->tx_quick_cons_trip;
6840 bp->tx_ticks_int = bp->tx_ticks;
6841 bp->rx_quick_cons_trip_int =
6842 bp->rx_quick_cons_trip;
6843 bp->rx_ticks_int = bp->rx_ticks;
6844 bp->comp_prod_trip_int = bp->comp_prod_trip;
6845 bp->com_ticks_int = bp->com_ticks;
6846 bp->cmd_ticks_int = bp->cmd_ticks;
6849 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6851 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6852 * with byte enables disabled on the unused 32-bit word. This is legal
6853 * but causes problems on the AMD 8132 which will eventually stop
6854 * responding after a while.
6856 * AMD believes this incompatibility is unique to the 5706, and
6857 * prefers to locally disable MSI rather than globally disabling it.
6859 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6860 struct pci_dev *amd_8132 = NULL;
6862 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6863 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6866 if (amd_8132->revision >= 0x10 &&
6867 amd_8132->revision <= 0x13) {
6869 pci_dev_put(amd_8132);
6875 bnx2_set_default_link(bp);
6876 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6878 init_timer(&bp->timer);
6879 bp->timer.expires = RUN_AT(bp->timer_interval);
6880 bp->timer.data = (unsigned long) bp;
6881 bp->timer.function = bnx2_timer;
6887 iounmap(bp->regview);
6892 pci_release_regions(pdev);
6895 pci_disable_device(pdev);
6896 pci_set_drvdata(pdev, NULL);
6902 static char * __devinit
6903 bnx2_bus_string(struct bnx2 *bp, char *str)
6907 if (bp->flags & PCIE_FLAG) {
6908 s += sprintf(s, "PCI Express");
6910 s += sprintf(s, "PCI");
6911 if (bp->flags & PCIX_FLAG)
6912 s += sprintf(s, "-X");
6913 if (bp->flags & PCI_32BIT_FLAG)
6914 s += sprintf(s, " 32-bit");
6916 s += sprintf(s, " 64-bit");
6917 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6922 static int __devinit
6923 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6925 static int version_printed = 0;
6926 struct net_device *dev = NULL;
6930 DECLARE_MAC_BUF(mac);
6932 if (version_printed++ == 0)
6933 printk(KERN_INFO "%s", version);
6935 /* dev zeroed in init_etherdev */
6936 dev = alloc_etherdev(sizeof(*bp));
6941 rc = bnx2_init_board(pdev, dev);
6947 dev->open = bnx2_open;
6948 dev->hard_start_xmit = bnx2_start_xmit;
6949 dev->stop = bnx2_close;
6950 dev->get_stats = bnx2_get_stats;
6951 dev->set_multicast_list = bnx2_set_rx_mode;
6952 dev->do_ioctl = bnx2_ioctl;
6953 dev->set_mac_address = bnx2_change_mac_addr;
6954 dev->change_mtu = bnx2_change_mtu;
6955 dev->tx_timeout = bnx2_tx_timeout;
6956 dev->watchdog_timeo = TX_TIMEOUT;
6958 dev->vlan_rx_register = bnx2_vlan_rx_register;
6960 dev->ethtool_ops = &bnx2_ethtool_ops;
6962 bp = netdev_priv(dev);
6963 netif_napi_add(dev, &bp->napi, bnx2_poll, 64);
6965 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6966 dev->poll_controller = poll_bnx2;
6969 pci_set_drvdata(pdev, dev);
6971 memcpy(dev->dev_addr, bp->mac_addr, 6);
6972 memcpy(dev->perm_addr, bp->mac_addr, 6);
6973 bp->name = board_info[ent->driver_data].name;
6975 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6976 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6977 dev->features |= NETIF_F_IPV6_CSUM;
6980 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6982 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6983 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6984 dev->features |= NETIF_F_TSO6;
6986 if ((rc = register_netdev(dev))) {
6987 dev_err(&pdev->dev, "Cannot register net device\n");
6989 iounmap(bp->regview);
6990 pci_release_regions(pdev);
6991 pci_disable_device(pdev);
6992 pci_set_drvdata(pdev, NULL);
6997 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6998 "IRQ %d, node addr %s\n",
7001 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7002 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7003 bnx2_bus_string(bp, str),
7005 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7010 static void __devexit
7011 bnx2_remove_one(struct pci_dev *pdev)
7013 struct net_device *dev = pci_get_drvdata(pdev);
7014 struct bnx2 *bp = netdev_priv(dev);
7016 flush_scheduled_work();
7018 unregister_netdev(dev);
7021 iounmap(bp->regview);
7024 pci_release_regions(pdev);
7025 pci_disable_device(pdev);
7026 pci_set_drvdata(pdev, NULL);
7030 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7032 struct net_device *dev = pci_get_drvdata(pdev);
7033 struct bnx2 *bp = netdev_priv(dev);
7036 /* PCI register 4 needs to be saved whether netif_running() or not.
7037 * MSI address and data need to be saved if using MSI and
7040 pci_save_state(pdev);
7041 if (!netif_running(dev))
7044 flush_scheduled_work();
7045 bnx2_netif_stop(bp);
7046 netif_device_detach(dev);
7047 del_timer_sync(&bp->timer);
7048 if (bp->flags & NO_WOL_FLAG)
7049 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7051 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7053 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7054 bnx2_reset_chip(bp, reset_code);
7056 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7061 bnx2_resume(struct pci_dev *pdev)
7063 struct net_device *dev = pci_get_drvdata(pdev);
7064 struct bnx2 *bp = netdev_priv(dev);
7066 pci_restore_state(pdev);
7067 if (!netif_running(dev))
7070 bnx2_set_power_state(bp, PCI_D0);
7071 netif_device_attach(dev);
7073 bnx2_netif_start(bp);
7077 static struct pci_driver bnx2_pci_driver = {
7078 .name = DRV_MODULE_NAME,
7079 .id_table = bnx2_pci_tbl,
7080 .probe = bnx2_init_one,
7081 .remove = __devexit_p(bnx2_remove_one),
7082 .suspend = bnx2_suspend,
7083 .resume = bnx2_resume,
7086 static int __init bnx2_init(void)
7088 return pci_register_driver(&bnx2_pci_driver);
7091 static void __exit bnx2_cleanup(void)
7093 pci_unregister_driver(&bnx2_pci_driver);
7096 module_init(bnx2_init);
7097 module_exit(bnx2_cleanup);