1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2007 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define DRV_MODULE_NAME "bnx2"
56 #define PFX DRV_MODULE_NAME ": "
57 #define DRV_MODULE_VERSION "1.5.11"
58 #define DRV_MODULE_RELDATE "June 4, 2007"
60 #define RUN_AT(x) (jiffies + (x))
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT (5*HZ)
65 static const char version[] __devinitdata =
66 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
73 static int disable_msi = 0;
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90 /* indexed by board_t, above */
93 } board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105 static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
127 static struct flash_spec flash_table[] =
130 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
131 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
134 /* Expansion entry 0001 */
135 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
139 /* Saifun SA25F010 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
141 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144 "Non-buffered flash (128kB)"},
145 /* Saifun SA25F020 (non-buffered flash) */
146 /* strap, cfg1, & write1 need updates */
147 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150 "Non-buffered flash (256kB)"},
151 /* Expansion entry 0100 */
152 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
156 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
157 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166 /* Saifun SA25F005 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171 "Non-buffered flash (64kB)"},
173 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
177 /* Expansion entry 1001 */
178 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182 /* Expansion entry 1010 */
183 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
187 /* ATMEL AT45DB011B (buffered flash) */
188 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191 "Buffered flash (128kB)"},
192 /* Expansion entry 1100 */
193 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197 /* Expansion entry 1101 */
198 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
202 /* Ateml Expansion entry 1110 */
203 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1110 (Atmel)"},
207 /* ATMEL AT45DB021B (buffered flash) */
208 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211 "Buffered flash (256kB)"},
214 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
216 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
222 /* The ring uses 256 indices for 255 entries, one of them
223 * needs to be skipped.
225 diff = bp->tx_prod - bp->tx_cons;
226 if (unlikely(diff >= TX_DESC_CNT)) {
228 if (diff == TX_DESC_CNT)
229 diff = MAX_TX_DESC_CNT;
231 return (bp->tx_ring_size - diff);
235 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
239 spin_lock_bh(&bp->indirect_lock);
240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242 spin_unlock_bh(&bp->indirect_lock);
247 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
249 spin_lock_bh(&bp->indirect_lock);
250 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
252 spin_unlock_bh(&bp->indirect_lock);
256 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
259 spin_lock_bh(&bp->indirect_lock);
260 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
263 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266 for (i = 0; i < 5; i++) {
268 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
274 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275 REG_WR(bp, BNX2_CTX_DATA, val);
277 spin_unlock_bh(&bp->indirect_lock);
281 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
286 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
290 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
296 val1 = (bp->phy_addr << 21) | (reg << 16) |
297 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298 BNX2_EMAC_MDIO_COMM_START_BUSY;
299 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
301 for (i = 0; i < 50; i++) {
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
308 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
315 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
324 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
328 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
338 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
343 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
347 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
353 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
358 for (i = 0; i < 50; i++) {
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
368 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
373 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
377 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
387 bnx2_disable_int(struct bnx2 *bp)
389 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
395 bnx2_enable_int(struct bnx2 *bp)
397 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
398 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
401 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
402 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
404 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
408 bnx2_disable_int_sync(struct bnx2 *bp)
410 atomic_inc(&bp->intr_sem);
411 bnx2_disable_int(bp);
412 synchronize_irq(bp->pdev->irq);
416 bnx2_netif_stop(struct bnx2 *bp)
418 bnx2_disable_int_sync(bp);
419 if (netif_running(bp->dev)) {
420 netif_poll_disable(bp->dev);
421 netif_tx_disable(bp->dev);
422 bp->dev->trans_start = jiffies; /* prevent tx timeout */
427 bnx2_netif_start(struct bnx2 *bp)
429 if (atomic_dec_and_test(&bp->intr_sem)) {
430 if (netif_running(bp->dev)) {
431 netif_wake_queue(bp->dev);
432 netif_poll_enable(bp->dev);
439 bnx2_free_mem(struct bnx2 *bp)
443 for (i = 0; i < bp->ctx_pages; i++) {
444 if (bp->ctx_blk[i]) {
445 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
447 bp->ctx_blk_mapping[i]);
448 bp->ctx_blk[i] = NULL;
451 if (bp->status_blk) {
452 pci_free_consistent(bp->pdev, bp->status_stats_size,
453 bp->status_blk, bp->status_blk_mapping);
454 bp->status_blk = NULL;
455 bp->stats_blk = NULL;
457 if (bp->tx_desc_ring) {
458 pci_free_consistent(bp->pdev,
459 sizeof(struct tx_bd) * TX_DESC_CNT,
460 bp->tx_desc_ring, bp->tx_desc_mapping);
461 bp->tx_desc_ring = NULL;
463 kfree(bp->tx_buf_ring);
464 bp->tx_buf_ring = NULL;
465 for (i = 0; i < bp->rx_max_ring; i++) {
466 if (bp->rx_desc_ring[i])
467 pci_free_consistent(bp->pdev,
468 sizeof(struct rx_bd) * RX_DESC_CNT,
470 bp->rx_desc_mapping[i]);
471 bp->rx_desc_ring[i] = NULL;
473 vfree(bp->rx_buf_ring);
474 bp->rx_buf_ring = NULL;
478 bnx2_alloc_mem(struct bnx2 *bp)
480 int i, status_blk_size;
482 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
484 if (bp->tx_buf_ring == NULL)
487 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488 sizeof(struct tx_bd) *
490 &bp->tx_desc_mapping);
491 if (bp->tx_desc_ring == NULL)
494 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
496 if (bp->rx_buf_ring == NULL)
499 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
502 for (i = 0; i < bp->rx_max_ring; i++) {
503 bp->rx_desc_ring[i] =
504 pci_alloc_consistent(bp->pdev,
505 sizeof(struct rx_bd) * RX_DESC_CNT,
506 &bp->rx_desc_mapping[i]);
507 if (bp->rx_desc_ring[i] == NULL)
512 /* Combine status and statistics blocks into one allocation. */
513 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514 bp->status_stats_size = status_blk_size +
515 sizeof(struct statistics_block);
517 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
518 &bp->status_blk_mapping);
519 if (bp->status_blk == NULL)
522 memset(bp->status_blk, 0, bp->status_stats_size);
524 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
527 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531 if (bp->ctx_pages == 0)
533 for (i = 0; i < bp->ctx_pages; i++) {
534 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
536 &bp->ctx_blk_mapping[i]);
537 if (bp->ctx_blk[i] == NULL)
549 bnx2_report_fw_link(struct bnx2 *bp)
551 u32 fw_link_status = 0;
553 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
559 switch (bp->line_speed) {
561 if (bp->duplex == DUPLEX_HALF)
562 fw_link_status = BNX2_LINK_STATUS_10HALF;
564 fw_link_status = BNX2_LINK_STATUS_10FULL;
567 if (bp->duplex == DUPLEX_HALF)
568 fw_link_status = BNX2_LINK_STATUS_100HALF;
570 fw_link_status = BNX2_LINK_STATUS_100FULL;
573 if (bp->duplex == DUPLEX_HALF)
574 fw_link_status = BNX2_LINK_STATUS_1000HALF;
576 fw_link_status = BNX2_LINK_STATUS_1000FULL;
579 if (bp->duplex == DUPLEX_HALF)
580 fw_link_status = BNX2_LINK_STATUS_2500HALF;
582 fw_link_status = BNX2_LINK_STATUS_2500FULL;
586 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
589 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
591 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
592 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
594 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
595 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
596 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
598 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
602 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
604 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
608 bnx2_xceiver_str(struct bnx2 *bp)
610 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
611 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
616 bnx2_report_link(struct bnx2 *bp)
619 netif_carrier_on(bp->dev);
620 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
621 bnx2_xceiver_str(bp));
623 printk("%d Mbps ", bp->line_speed);
625 if (bp->duplex == DUPLEX_FULL)
626 printk("full duplex");
628 printk("half duplex");
631 if (bp->flow_ctrl & FLOW_CTRL_RX) {
632 printk(", receive ");
633 if (bp->flow_ctrl & FLOW_CTRL_TX)
634 printk("& transmit ");
637 printk(", transmit ");
639 printk("flow control ON");
644 netif_carrier_off(bp->dev);
645 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
646 bnx2_xceiver_str(bp));
649 bnx2_report_fw_link(bp);
653 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
655 u32 local_adv, remote_adv;
658 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
659 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
661 if (bp->duplex == DUPLEX_FULL) {
662 bp->flow_ctrl = bp->req_flow_ctrl;
667 if (bp->duplex != DUPLEX_FULL) {
671 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
672 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
675 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
676 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
677 bp->flow_ctrl |= FLOW_CTRL_TX;
678 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
679 bp->flow_ctrl |= FLOW_CTRL_RX;
683 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
684 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
686 if (bp->phy_flags & PHY_SERDES_FLAG) {
687 u32 new_local_adv = 0;
688 u32 new_remote_adv = 0;
690 if (local_adv & ADVERTISE_1000XPAUSE)
691 new_local_adv |= ADVERTISE_PAUSE_CAP;
692 if (local_adv & ADVERTISE_1000XPSE_ASYM)
693 new_local_adv |= ADVERTISE_PAUSE_ASYM;
694 if (remote_adv & ADVERTISE_1000XPAUSE)
695 new_remote_adv |= ADVERTISE_PAUSE_CAP;
696 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
697 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
699 local_adv = new_local_adv;
700 remote_adv = new_remote_adv;
703 /* See Table 28B-3 of 802.3ab-1999 spec. */
704 if (local_adv & ADVERTISE_PAUSE_CAP) {
705 if(local_adv & ADVERTISE_PAUSE_ASYM) {
706 if (remote_adv & ADVERTISE_PAUSE_CAP) {
707 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
709 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
710 bp->flow_ctrl = FLOW_CTRL_RX;
714 if (remote_adv & ADVERTISE_PAUSE_CAP) {
715 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
719 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
720 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
721 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
723 bp->flow_ctrl = FLOW_CTRL_TX;
729 bnx2_5709s_linkup(struct bnx2 *bp)
735 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
736 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
737 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
739 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
740 bp->line_speed = bp->req_line_speed;
741 bp->duplex = bp->req_duplex;
744 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
746 case MII_BNX2_GP_TOP_AN_SPEED_10:
747 bp->line_speed = SPEED_10;
749 case MII_BNX2_GP_TOP_AN_SPEED_100:
750 bp->line_speed = SPEED_100;
752 case MII_BNX2_GP_TOP_AN_SPEED_1G:
753 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
754 bp->line_speed = SPEED_1000;
756 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
757 bp->line_speed = SPEED_2500;
760 if (val & MII_BNX2_GP_TOP_AN_FD)
761 bp->duplex = DUPLEX_FULL;
763 bp->duplex = DUPLEX_HALF;
768 bnx2_5708s_linkup(struct bnx2 *bp)
773 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
774 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
775 case BCM5708S_1000X_STAT1_SPEED_10:
776 bp->line_speed = SPEED_10;
778 case BCM5708S_1000X_STAT1_SPEED_100:
779 bp->line_speed = SPEED_100;
781 case BCM5708S_1000X_STAT1_SPEED_1G:
782 bp->line_speed = SPEED_1000;
784 case BCM5708S_1000X_STAT1_SPEED_2G5:
785 bp->line_speed = SPEED_2500;
788 if (val & BCM5708S_1000X_STAT1_FD)
789 bp->duplex = DUPLEX_FULL;
791 bp->duplex = DUPLEX_HALF;
797 bnx2_5706s_linkup(struct bnx2 *bp)
799 u32 bmcr, local_adv, remote_adv, common;
802 bp->line_speed = SPEED_1000;
804 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
805 if (bmcr & BMCR_FULLDPLX) {
806 bp->duplex = DUPLEX_FULL;
809 bp->duplex = DUPLEX_HALF;
812 if (!(bmcr & BMCR_ANENABLE)) {
816 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
817 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
819 common = local_adv & remote_adv;
820 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
822 if (common & ADVERTISE_1000XFULL) {
823 bp->duplex = DUPLEX_FULL;
826 bp->duplex = DUPLEX_HALF;
834 bnx2_copper_linkup(struct bnx2 *bp)
838 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
839 if (bmcr & BMCR_ANENABLE) {
840 u32 local_adv, remote_adv, common;
842 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
843 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
845 common = local_adv & (remote_adv >> 2);
846 if (common & ADVERTISE_1000FULL) {
847 bp->line_speed = SPEED_1000;
848 bp->duplex = DUPLEX_FULL;
850 else if (common & ADVERTISE_1000HALF) {
851 bp->line_speed = SPEED_1000;
852 bp->duplex = DUPLEX_HALF;
855 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
856 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
858 common = local_adv & remote_adv;
859 if (common & ADVERTISE_100FULL) {
860 bp->line_speed = SPEED_100;
861 bp->duplex = DUPLEX_FULL;
863 else if (common & ADVERTISE_100HALF) {
864 bp->line_speed = SPEED_100;
865 bp->duplex = DUPLEX_HALF;
867 else if (common & ADVERTISE_10FULL) {
868 bp->line_speed = SPEED_10;
869 bp->duplex = DUPLEX_FULL;
871 else if (common & ADVERTISE_10HALF) {
872 bp->line_speed = SPEED_10;
873 bp->duplex = DUPLEX_HALF;
882 if (bmcr & BMCR_SPEED100) {
883 bp->line_speed = SPEED_100;
886 bp->line_speed = SPEED_10;
888 if (bmcr & BMCR_FULLDPLX) {
889 bp->duplex = DUPLEX_FULL;
892 bp->duplex = DUPLEX_HALF;
900 bnx2_set_mac_link(struct bnx2 *bp)
904 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
905 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
906 (bp->duplex == DUPLEX_HALF)) {
907 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
910 /* Configure the EMAC mode register. */
911 val = REG_RD(bp, BNX2_EMAC_MODE);
913 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
914 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
915 BNX2_EMAC_MODE_25G_MODE);
918 switch (bp->line_speed) {
920 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
921 val |= BNX2_EMAC_MODE_PORT_MII_10M;
926 val |= BNX2_EMAC_MODE_PORT_MII;
929 val |= BNX2_EMAC_MODE_25G_MODE;
932 val |= BNX2_EMAC_MODE_PORT_GMII;
937 val |= BNX2_EMAC_MODE_PORT_GMII;
940 /* Set the MAC to operate in the appropriate duplex mode. */
941 if (bp->duplex == DUPLEX_HALF)
942 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
943 REG_WR(bp, BNX2_EMAC_MODE, val);
945 /* Enable/disable rx PAUSE. */
946 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
948 if (bp->flow_ctrl & FLOW_CTRL_RX)
949 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
950 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
952 /* Enable/disable tx PAUSE. */
953 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
954 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
956 if (bp->flow_ctrl & FLOW_CTRL_TX)
957 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
958 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
960 /* Acknowledge the interrupt. */
961 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
967 bnx2_enable_bmsr1(struct bnx2 *bp)
969 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
970 (CHIP_NUM(bp) == CHIP_NUM_5709))
971 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
972 MII_BNX2_BLK_ADDR_GP_STATUS);
976 bnx2_disable_bmsr1(struct bnx2 *bp)
978 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
979 (CHIP_NUM(bp) == CHIP_NUM_5709))
980 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
981 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
985 bnx2_test_and_enable_2g5(struct bnx2 *bp)
990 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
993 if (bp->autoneg & AUTONEG_SPEED)
994 bp->advertising |= ADVERTISED_2500baseX_Full;
996 if (CHIP_NUM(bp) == CHIP_NUM_5709)
997 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
999 bnx2_read_phy(bp, bp->mii_up1, &up1);
1000 if (!(up1 & BCM5708S_UP1_2G5)) {
1001 up1 |= BCM5708S_UP1_2G5;
1002 bnx2_write_phy(bp, bp->mii_up1, up1);
1006 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1007 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1008 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1014 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1019 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1022 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1023 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1025 bnx2_read_phy(bp, bp->mii_up1, &up1);
1026 if (up1 & BCM5708S_UP1_2G5) {
1027 up1 &= ~BCM5708S_UP1_2G5;
1028 bnx2_write_phy(bp, bp->mii_up1, up1);
1032 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1033 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1034 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1040 bnx2_enable_forced_2g5(struct bnx2 *bp)
1044 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1047 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1050 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1051 MII_BNX2_BLK_ADDR_SERDES_DIG);
1052 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1053 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1054 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1055 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1057 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1058 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1059 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1061 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1062 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1063 bmcr |= BCM5708S_BMCR_FORCE_2500;
1066 if (bp->autoneg & AUTONEG_SPEED) {
1067 bmcr &= ~BMCR_ANENABLE;
1068 if (bp->req_duplex == DUPLEX_FULL)
1069 bmcr |= BMCR_FULLDPLX;
1071 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1075 bnx2_disable_forced_2g5(struct bnx2 *bp)
1079 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1082 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1085 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1086 MII_BNX2_BLK_ADDR_SERDES_DIG);
1087 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1088 val &= ~MII_BNX2_SD_MISC1_FORCE;
1089 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1091 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1095 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1096 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1097 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1100 if (bp->autoneg & AUTONEG_SPEED)
1101 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1102 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1106 bnx2_set_link(struct bnx2 *bp)
1111 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1116 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1119 link_up = bp->link_up;
1121 bnx2_enable_bmsr1(bp);
1122 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1123 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1124 bnx2_disable_bmsr1(bp);
1126 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1127 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1130 val = REG_RD(bp, BNX2_EMAC_STATUS);
1131 if (val & BNX2_EMAC_STATUS_LINK)
1132 bmsr |= BMSR_LSTATUS;
1134 bmsr &= ~BMSR_LSTATUS;
1137 if (bmsr & BMSR_LSTATUS) {
1140 if (bp->phy_flags & PHY_SERDES_FLAG) {
1141 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1142 bnx2_5706s_linkup(bp);
1143 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1144 bnx2_5708s_linkup(bp);
1145 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1146 bnx2_5709s_linkup(bp);
1149 bnx2_copper_linkup(bp);
1151 bnx2_resolve_flow_ctrl(bp);
1154 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1155 (bp->autoneg & AUTONEG_SPEED))
1156 bnx2_disable_forced_2g5(bp);
1158 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1162 if (bp->link_up != link_up) {
1163 bnx2_report_link(bp);
1166 bnx2_set_mac_link(bp);
1172 bnx2_reset_phy(struct bnx2 *bp)
1177 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1179 #define PHY_RESET_MAX_WAIT 100
1180 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1183 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1184 if (!(reg & BMCR_RESET)) {
1189 if (i == PHY_RESET_MAX_WAIT) {
1196 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1200 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1201 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1203 if (bp->phy_flags & PHY_SERDES_FLAG) {
1204 adv = ADVERTISE_1000XPAUSE;
1207 adv = ADVERTISE_PAUSE_CAP;
1210 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1211 if (bp->phy_flags & PHY_SERDES_FLAG) {
1212 adv = ADVERTISE_1000XPSE_ASYM;
1215 adv = ADVERTISE_PAUSE_ASYM;
1218 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1219 if (bp->phy_flags & PHY_SERDES_FLAG) {
1220 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1223 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1229 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1232 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1234 u32 speed_arg = 0, pause_adv;
1236 pause_adv = bnx2_phy_get_pause_adv(bp);
1238 if (bp->autoneg & AUTONEG_SPEED) {
1239 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1240 if (bp->advertising & ADVERTISED_10baseT_Half)
1241 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1242 if (bp->advertising & ADVERTISED_10baseT_Full)
1243 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1244 if (bp->advertising & ADVERTISED_100baseT_Half)
1245 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1246 if (bp->advertising & ADVERTISED_100baseT_Full)
1247 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1248 if (bp->advertising & ADVERTISED_1000baseT_Full)
1249 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1250 if (bp->advertising & ADVERTISED_2500baseX_Full)
1251 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1253 if (bp->req_line_speed == SPEED_2500)
1254 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1255 else if (bp->req_line_speed == SPEED_1000)
1256 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1257 else if (bp->req_line_speed == SPEED_100) {
1258 if (bp->req_duplex == DUPLEX_FULL)
1259 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1261 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1262 } else if (bp->req_line_speed == SPEED_10) {
1263 if (bp->req_duplex == DUPLEX_FULL)
1264 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1266 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1270 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1271 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1272 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1273 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1275 if (port == PORT_TP)
1276 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1277 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1279 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1281 spin_unlock_bh(&bp->phy_lock);
1282 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1283 spin_lock_bh(&bp->phy_lock);
1289 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1294 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1295 return (bnx2_setup_remote_phy(bp, port));
1297 if (!(bp->autoneg & AUTONEG_SPEED)) {
1299 int force_link_down = 0;
1301 if (bp->req_line_speed == SPEED_2500) {
1302 if (!bnx2_test_and_enable_2g5(bp))
1303 force_link_down = 1;
1304 } else if (bp->req_line_speed == SPEED_1000) {
1305 if (bnx2_test_and_disable_2g5(bp))
1306 force_link_down = 1;
1308 bnx2_read_phy(bp, bp->mii_adv, &adv);
1309 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1311 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1312 new_bmcr = bmcr & ~BMCR_ANENABLE;
1313 new_bmcr |= BMCR_SPEED1000;
1315 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1316 if (bp->req_line_speed == SPEED_2500)
1317 bnx2_enable_forced_2g5(bp);
1318 else if (bp->req_line_speed == SPEED_1000) {
1319 bnx2_disable_forced_2g5(bp);
1320 new_bmcr &= ~0x2000;
1323 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1324 if (bp->req_line_speed == SPEED_2500)
1325 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1327 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1330 if (bp->req_duplex == DUPLEX_FULL) {
1331 adv |= ADVERTISE_1000XFULL;
1332 new_bmcr |= BMCR_FULLDPLX;
1335 adv |= ADVERTISE_1000XHALF;
1336 new_bmcr &= ~BMCR_FULLDPLX;
1338 if ((new_bmcr != bmcr) || (force_link_down)) {
1339 /* Force a link down visible on the other side */
1341 bnx2_write_phy(bp, bp->mii_adv, adv &
1342 ~(ADVERTISE_1000XFULL |
1343 ADVERTISE_1000XHALF));
1344 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1345 BMCR_ANRESTART | BMCR_ANENABLE);
1348 netif_carrier_off(bp->dev);
1349 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1350 bnx2_report_link(bp);
1352 bnx2_write_phy(bp, bp->mii_adv, adv);
1353 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1355 bnx2_resolve_flow_ctrl(bp);
1356 bnx2_set_mac_link(bp);
1361 bnx2_test_and_enable_2g5(bp);
1363 if (bp->advertising & ADVERTISED_1000baseT_Full)
1364 new_adv |= ADVERTISE_1000XFULL;
1366 new_adv |= bnx2_phy_get_pause_adv(bp);
1368 bnx2_read_phy(bp, bp->mii_adv, &adv);
1369 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1371 bp->serdes_an_pending = 0;
1372 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1373 /* Force a link down visible on the other side */
1375 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1376 spin_unlock_bh(&bp->phy_lock);
1378 spin_lock_bh(&bp->phy_lock);
1381 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1382 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1384 /* Speed up link-up time when the link partner
1385 * does not autonegotiate which is very common
1386 * in blade servers. Some blade servers use
1387 * IPMI for kerboard input and it's important
1388 * to minimize link disruptions. Autoneg. involves
1389 * exchanging base pages plus 3 next pages and
1390 * normally completes in about 120 msec.
1392 bp->current_interval = SERDES_AN_TIMEOUT;
1393 bp->serdes_an_pending = 1;
1394 mod_timer(&bp->timer, jiffies + bp->current_interval);
1396 bnx2_resolve_flow_ctrl(bp);
1397 bnx2_set_mac_link(bp);
1403 #define ETHTOOL_ALL_FIBRE_SPEED \
1404 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1405 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1406 (ADVERTISED_1000baseT_Full)
1408 #define ETHTOOL_ALL_COPPER_SPEED \
1409 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1410 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1411 ADVERTISED_1000baseT_Full)
1413 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1414 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1416 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1419 bnx2_set_default_remote_link(struct bnx2 *bp)
1423 if (bp->phy_port == PORT_TP)
1424 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1426 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1428 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1429 bp->req_line_speed = 0;
1430 bp->autoneg |= AUTONEG_SPEED;
1431 bp->advertising = ADVERTISED_Autoneg;
1432 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1433 bp->advertising |= ADVERTISED_10baseT_Half;
1434 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1435 bp->advertising |= ADVERTISED_10baseT_Full;
1436 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1437 bp->advertising |= ADVERTISED_100baseT_Half;
1438 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1439 bp->advertising |= ADVERTISED_100baseT_Full;
1440 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1441 bp->advertising |= ADVERTISED_1000baseT_Full;
1442 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1443 bp->advertising |= ADVERTISED_2500baseX_Full;
1446 bp->advertising = 0;
1447 bp->req_duplex = DUPLEX_FULL;
1448 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1449 bp->req_line_speed = SPEED_10;
1450 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1451 bp->req_duplex = DUPLEX_HALF;
1453 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1454 bp->req_line_speed = SPEED_100;
1455 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1456 bp->req_duplex = DUPLEX_HALF;
1458 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1459 bp->req_line_speed = SPEED_1000;
1460 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1461 bp->req_line_speed = SPEED_2500;
1466 bnx2_set_default_link(struct bnx2 *bp)
1468 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1469 return bnx2_set_default_remote_link(bp);
1471 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1472 bp->req_line_speed = 0;
1473 if (bp->phy_flags & PHY_SERDES_FLAG) {
1476 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1478 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1479 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1480 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1482 bp->req_line_speed = bp->line_speed = SPEED_1000;
1483 bp->req_duplex = DUPLEX_FULL;
1486 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1490 bnx2_remote_phy_event(struct bnx2 *bp)
1493 u8 link_up = bp->link_up;
1496 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1498 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1504 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1505 bp->duplex = DUPLEX_FULL;
1507 case BNX2_LINK_STATUS_10HALF:
1508 bp->duplex = DUPLEX_HALF;
1509 case BNX2_LINK_STATUS_10FULL:
1510 bp->line_speed = SPEED_10;
1512 case BNX2_LINK_STATUS_100HALF:
1513 bp->duplex = DUPLEX_HALF;
1514 case BNX2_LINK_STATUS_100BASE_T4:
1515 case BNX2_LINK_STATUS_100FULL:
1516 bp->line_speed = SPEED_100;
1518 case BNX2_LINK_STATUS_1000HALF:
1519 bp->duplex = DUPLEX_HALF;
1520 case BNX2_LINK_STATUS_1000FULL:
1521 bp->line_speed = SPEED_1000;
1523 case BNX2_LINK_STATUS_2500HALF:
1524 bp->duplex = DUPLEX_HALF;
1525 case BNX2_LINK_STATUS_2500FULL:
1526 bp->line_speed = SPEED_2500;
1533 spin_lock(&bp->phy_lock);
1535 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1536 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1537 if (bp->duplex == DUPLEX_FULL)
1538 bp->flow_ctrl = bp->req_flow_ctrl;
1540 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1541 bp->flow_ctrl |= FLOW_CTRL_TX;
1542 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1543 bp->flow_ctrl |= FLOW_CTRL_RX;
1546 old_port = bp->phy_port;
1547 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1548 bp->phy_port = PORT_FIBRE;
1550 bp->phy_port = PORT_TP;
1552 if (old_port != bp->phy_port)
1553 bnx2_set_default_link(bp);
1555 spin_unlock(&bp->phy_lock);
1557 if (bp->link_up != link_up)
1558 bnx2_report_link(bp);
1560 bnx2_set_mac_link(bp);
1564 bnx2_set_remote_link(struct bnx2 *bp)
1568 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1570 case BNX2_FW_EVT_CODE_LINK_EVENT:
1571 bnx2_remote_phy_event(bp);
1573 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1581 bnx2_setup_copper_phy(struct bnx2 *bp)
1586 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1588 if (bp->autoneg & AUTONEG_SPEED) {
1589 u32 adv_reg, adv1000_reg;
1590 u32 new_adv_reg = 0;
1591 u32 new_adv1000_reg = 0;
1593 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1594 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1595 ADVERTISE_PAUSE_ASYM);
1597 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1598 adv1000_reg &= PHY_ALL_1000_SPEED;
1600 if (bp->advertising & ADVERTISED_10baseT_Half)
1601 new_adv_reg |= ADVERTISE_10HALF;
1602 if (bp->advertising & ADVERTISED_10baseT_Full)
1603 new_adv_reg |= ADVERTISE_10FULL;
1604 if (bp->advertising & ADVERTISED_100baseT_Half)
1605 new_adv_reg |= ADVERTISE_100HALF;
1606 if (bp->advertising & ADVERTISED_100baseT_Full)
1607 new_adv_reg |= ADVERTISE_100FULL;
1608 if (bp->advertising & ADVERTISED_1000baseT_Full)
1609 new_adv1000_reg |= ADVERTISE_1000FULL;
1611 new_adv_reg |= ADVERTISE_CSMA;
1613 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1615 if ((adv1000_reg != new_adv1000_reg) ||
1616 (adv_reg != new_adv_reg) ||
1617 ((bmcr & BMCR_ANENABLE) == 0)) {
1619 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1620 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1621 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1624 else if (bp->link_up) {
1625 /* Flow ctrl may have changed from auto to forced */
1626 /* or vice-versa. */
1628 bnx2_resolve_flow_ctrl(bp);
1629 bnx2_set_mac_link(bp);
1635 if (bp->req_line_speed == SPEED_100) {
1636 new_bmcr |= BMCR_SPEED100;
1638 if (bp->req_duplex == DUPLEX_FULL) {
1639 new_bmcr |= BMCR_FULLDPLX;
1641 if (new_bmcr != bmcr) {
1644 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1645 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1647 if (bmsr & BMSR_LSTATUS) {
1648 /* Force link down */
1649 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1650 spin_unlock_bh(&bp->phy_lock);
1652 spin_lock_bh(&bp->phy_lock);
1654 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1655 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1658 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1660 /* Normally, the new speed is setup after the link has
1661 * gone down and up again. In some cases, link will not go
1662 * down so we need to set up the new speed here.
1664 if (bmsr & BMSR_LSTATUS) {
1665 bp->line_speed = bp->req_line_speed;
1666 bp->duplex = bp->req_duplex;
1667 bnx2_resolve_flow_ctrl(bp);
1668 bnx2_set_mac_link(bp);
1671 bnx2_resolve_flow_ctrl(bp);
1672 bnx2_set_mac_link(bp);
1678 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1680 if (bp->loopback == MAC_LOOPBACK)
1683 if (bp->phy_flags & PHY_SERDES_FLAG) {
1684 return (bnx2_setup_serdes_phy(bp, port));
1687 return (bnx2_setup_copper_phy(bp));
1692 bnx2_init_5709s_phy(struct bnx2 *bp)
1696 bp->mii_bmcr = MII_BMCR + 0x10;
1697 bp->mii_bmsr = MII_BMSR + 0x10;
1698 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1699 bp->mii_adv = MII_ADVERTISE + 0x10;
1700 bp->mii_lpa = MII_LPA + 0x10;
1701 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1703 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1704 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1706 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1709 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1711 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1712 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1713 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1714 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1716 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1717 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1718 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1719 val |= BCM5708S_UP1_2G5;
1721 val &= ~BCM5708S_UP1_2G5;
1722 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1724 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1725 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1726 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1727 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1729 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1731 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1732 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1733 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1735 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1741 bnx2_init_5708s_phy(struct bnx2 *bp)
1747 bp->mii_up1 = BCM5708S_UP1;
1749 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1750 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1751 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1753 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1754 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1755 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1757 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1758 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1759 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1761 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1762 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1763 val |= BCM5708S_UP1_2G5;
1764 bnx2_write_phy(bp, BCM5708S_UP1, val);
1767 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1768 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1769 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1770 /* increase tx signal amplitude */
1771 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1772 BCM5708S_BLK_ADDR_TX_MISC);
1773 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1774 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1775 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1776 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1779 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1780 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1785 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1786 BNX2_SHARED_HW_CFG_CONFIG);
1787 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1788 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1789 BCM5708S_BLK_ADDR_TX_MISC);
1790 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1791 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1792 BCM5708S_BLK_ADDR_DIG);
1799 bnx2_init_5706s_phy(struct bnx2 *bp)
1803 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1805 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1806 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1808 if (bp->dev->mtu > 1500) {
1811 /* Set extended packet length bit */
1812 bnx2_write_phy(bp, 0x18, 0x7);
1813 bnx2_read_phy(bp, 0x18, &val);
1814 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1816 bnx2_write_phy(bp, 0x1c, 0x6c00);
1817 bnx2_read_phy(bp, 0x1c, &val);
1818 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1823 bnx2_write_phy(bp, 0x18, 0x7);
1824 bnx2_read_phy(bp, 0x18, &val);
1825 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1827 bnx2_write_phy(bp, 0x1c, 0x6c00);
1828 bnx2_read_phy(bp, 0x1c, &val);
1829 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1836 bnx2_init_copper_phy(struct bnx2 *bp)
1842 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1843 bnx2_write_phy(bp, 0x18, 0x0c00);
1844 bnx2_write_phy(bp, 0x17, 0x000a);
1845 bnx2_write_phy(bp, 0x15, 0x310b);
1846 bnx2_write_phy(bp, 0x17, 0x201f);
1847 bnx2_write_phy(bp, 0x15, 0x9506);
1848 bnx2_write_phy(bp, 0x17, 0x401f);
1849 bnx2_write_phy(bp, 0x15, 0x14e2);
1850 bnx2_write_phy(bp, 0x18, 0x0400);
1853 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1854 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1855 MII_BNX2_DSP_EXPAND_REG | 0x8);
1856 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1858 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1861 if (bp->dev->mtu > 1500) {
1862 /* Set extended packet length bit */
1863 bnx2_write_phy(bp, 0x18, 0x7);
1864 bnx2_read_phy(bp, 0x18, &val);
1865 bnx2_write_phy(bp, 0x18, val | 0x4000);
1867 bnx2_read_phy(bp, 0x10, &val);
1868 bnx2_write_phy(bp, 0x10, val | 0x1);
1871 bnx2_write_phy(bp, 0x18, 0x7);
1872 bnx2_read_phy(bp, 0x18, &val);
1873 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1875 bnx2_read_phy(bp, 0x10, &val);
1876 bnx2_write_phy(bp, 0x10, val & ~0x1);
1879 /* ethernet@wirespeed */
1880 bnx2_write_phy(bp, 0x18, 0x7007);
1881 bnx2_read_phy(bp, 0x18, &val);
1882 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1888 bnx2_init_phy(struct bnx2 *bp)
1893 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1894 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1896 bp->mii_bmcr = MII_BMCR;
1897 bp->mii_bmsr = MII_BMSR;
1898 bp->mii_bmsr1 = MII_BMSR;
1899 bp->mii_adv = MII_ADVERTISE;
1900 bp->mii_lpa = MII_LPA;
1902 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1904 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1907 bnx2_read_phy(bp, MII_PHYSID1, &val);
1908 bp->phy_id = val << 16;
1909 bnx2_read_phy(bp, MII_PHYSID2, &val);
1910 bp->phy_id |= val & 0xffff;
1912 if (bp->phy_flags & PHY_SERDES_FLAG) {
1913 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1914 rc = bnx2_init_5706s_phy(bp);
1915 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1916 rc = bnx2_init_5708s_phy(bp);
1917 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1918 rc = bnx2_init_5709s_phy(bp);
1921 rc = bnx2_init_copper_phy(bp);
1926 rc = bnx2_setup_phy(bp, bp->phy_port);
1932 bnx2_set_mac_loopback(struct bnx2 *bp)
1936 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1937 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1938 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1939 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1944 static int bnx2_test_link(struct bnx2 *);
1947 bnx2_set_phy_loopback(struct bnx2 *bp)
1952 spin_lock_bh(&bp->phy_lock);
1953 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1955 spin_unlock_bh(&bp->phy_lock);
1959 for (i = 0; i < 10; i++) {
1960 if (bnx2_test_link(bp) == 0)
1965 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1966 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1967 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1968 BNX2_EMAC_MODE_25G_MODE);
1970 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1971 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1977 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1983 msg_data |= bp->fw_wr_seq;
1985 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1987 /* wait for an acknowledgement. */
1988 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1991 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1993 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1996 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1999 /* If we timed out, inform the firmware that this is the case. */
2000 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2002 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2005 msg_data &= ~BNX2_DRV_MSG_CODE;
2006 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2008 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2013 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2020 bnx2_init_5709_context(struct bnx2 *bp)
2025 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2026 val |= (BCM_PAGE_BITS - 8) << 16;
2027 REG_WR(bp, BNX2_CTX_COMMAND, val);
2028 for (i = 0; i < 10; i++) {
2029 val = REG_RD(bp, BNX2_CTX_COMMAND);
2030 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2034 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2037 for (i = 0; i < bp->ctx_pages; i++) {
2040 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2041 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2042 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2043 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2044 (u64) bp->ctx_blk_mapping[i] >> 32);
2045 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2046 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2047 for (j = 0; j < 10; j++) {
2049 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2050 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2054 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2063 bnx2_init_context(struct bnx2 *bp)
2069 u32 vcid_addr, pcid_addr, offset;
2074 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2077 vcid_addr = GET_PCID_ADDR(vcid);
2079 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2084 pcid_addr = GET_PCID_ADDR(new_vcid);
2087 vcid_addr = GET_CID_ADDR(vcid);
2088 pcid_addr = vcid_addr;
2091 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2092 vcid_addr += (i << PHY_CTX_SHIFT);
2093 pcid_addr += (i << PHY_CTX_SHIFT);
2095 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2096 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2098 /* Zero out the context. */
2099 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2100 CTX_WR(bp, 0x00, offset, 0);
2102 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2103 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2109 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2115 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2116 if (good_mbuf == NULL) {
2117 printk(KERN_ERR PFX "Failed to allocate memory in "
2118 "bnx2_alloc_bad_rbuf\n");
2122 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2123 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2127 /* Allocate a bunch of mbufs and save the good ones in an array. */
2128 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2129 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2130 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2132 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2134 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2136 /* The addresses with Bit 9 set are bad memory blocks. */
2137 if (!(val & (1 << 9))) {
2138 good_mbuf[good_mbuf_cnt] = (u16) val;
2142 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2145 /* Free the good ones back to the mbuf pool thus discarding
2146 * all the bad ones. */
2147 while (good_mbuf_cnt) {
2150 val = good_mbuf[good_mbuf_cnt];
2151 val = (val << 9) | val | 1;
2153 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2160 bnx2_set_mac_addr(struct bnx2 *bp)
2163 u8 *mac_addr = bp->dev->dev_addr;
2165 val = (mac_addr[0] << 8) | mac_addr[1];
2167 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2169 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2170 (mac_addr[4] << 8) | mac_addr[5];
2172 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2176 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2178 struct sk_buff *skb;
2179 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2181 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2182 unsigned long align;
2184 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2189 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2190 skb_reserve(skb, BNX2_RX_ALIGN - align);
2192 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2193 PCI_DMA_FROMDEVICE);
2196 pci_unmap_addr_set(rx_buf, mapping, mapping);
2198 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2199 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2201 bp->rx_prod_bseq += bp->rx_buf_use_size;
2207 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2209 struct status_block *sblk = bp->status_blk;
2210 u32 new_link_state, old_link_state;
2213 new_link_state = sblk->status_attn_bits & event;
2214 old_link_state = sblk->status_attn_bits_ack & event;
2215 if (new_link_state != old_link_state) {
2217 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2219 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2227 bnx2_phy_int(struct bnx2 *bp)
2229 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2230 spin_lock(&bp->phy_lock);
2232 spin_unlock(&bp->phy_lock);
2234 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2235 bnx2_set_remote_link(bp);
2240 bnx2_tx_int(struct bnx2 *bp)
2242 struct status_block *sblk = bp->status_blk;
2243 u16 hw_cons, sw_cons, sw_ring_cons;
2246 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2247 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2250 sw_cons = bp->tx_cons;
2252 while (sw_cons != hw_cons) {
2253 struct sw_bd *tx_buf;
2254 struct sk_buff *skb;
2257 sw_ring_cons = TX_RING_IDX(sw_cons);
2259 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2262 /* partial BD completions possible with TSO packets */
2263 if (skb_is_gso(skb)) {
2264 u16 last_idx, last_ring_idx;
2266 last_idx = sw_cons +
2267 skb_shinfo(skb)->nr_frags + 1;
2268 last_ring_idx = sw_ring_cons +
2269 skb_shinfo(skb)->nr_frags + 1;
2270 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2273 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2278 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2279 skb_headlen(skb), PCI_DMA_TODEVICE);
2282 last = skb_shinfo(skb)->nr_frags;
2284 for (i = 0; i < last; i++) {
2285 sw_cons = NEXT_TX_BD(sw_cons);
2287 pci_unmap_page(bp->pdev,
2289 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2291 skb_shinfo(skb)->frags[i].size,
2295 sw_cons = NEXT_TX_BD(sw_cons);
2297 tx_free_bd += last + 1;
2301 hw_cons = bp->hw_tx_cons =
2302 sblk->status_tx_quick_consumer_index0;
2304 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2309 bp->tx_cons = sw_cons;
2310 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2311 * before checking for netif_queue_stopped(). Without the
2312 * memory barrier, there is a small possibility that bnx2_start_xmit()
2313 * will miss it and cause the queue to be stopped forever.
2317 if (unlikely(netif_queue_stopped(bp->dev)) &&
2318 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2319 netif_tx_lock(bp->dev);
2320 if ((netif_queue_stopped(bp->dev)) &&
2321 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2322 netif_wake_queue(bp->dev);
2323 netif_tx_unlock(bp->dev);
2328 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2331 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2332 struct rx_bd *cons_bd, *prod_bd;
2334 cons_rx_buf = &bp->rx_buf_ring[cons];
2335 prod_rx_buf = &bp->rx_buf_ring[prod];
2337 pci_dma_sync_single_for_device(bp->pdev,
2338 pci_unmap_addr(cons_rx_buf, mapping),
2339 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2341 bp->rx_prod_bseq += bp->rx_buf_use_size;
2343 prod_rx_buf->skb = skb;
2348 pci_unmap_addr_set(prod_rx_buf, mapping,
2349 pci_unmap_addr(cons_rx_buf, mapping));
2351 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2352 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2353 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2354 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2358 bnx2_rx_int(struct bnx2 *bp, int budget)
2360 struct status_block *sblk = bp->status_blk;
2361 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2362 struct l2_fhdr *rx_hdr;
2365 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2366 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2369 sw_cons = bp->rx_cons;
2370 sw_prod = bp->rx_prod;
2372 /* Memory barrier necessary as speculative reads of the rx
2373 * buffer can be ahead of the index in the status block
2376 while (sw_cons != hw_cons) {
2379 struct sw_bd *rx_buf;
2380 struct sk_buff *skb;
2381 dma_addr_t dma_addr;
2383 sw_ring_cons = RX_RING_IDX(sw_cons);
2384 sw_ring_prod = RX_RING_IDX(sw_prod);
2386 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2391 dma_addr = pci_unmap_addr(rx_buf, mapping);
2393 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2394 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2396 rx_hdr = (struct l2_fhdr *) skb->data;
2397 len = rx_hdr->l2_fhdr_pkt_len - 4;
2399 if ((status = rx_hdr->l2_fhdr_status) &
2400 (L2_FHDR_ERRORS_BAD_CRC |
2401 L2_FHDR_ERRORS_PHY_DECODE |
2402 L2_FHDR_ERRORS_ALIGNMENT |
2403 L2_FHDR_ERRORS_TOO_SHORT |
2404 L2_FHDR_ERRORS_GIANT_FRAME)) {
2409 /* Since we don't have a jumbo ring, copy small packets
2412 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2413 struct sk_buff *new_skb;
2415 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2416 if (new_skb == NULL)
2420 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2421 new_skb->data, len + 2);
2422 skb_reserve(new_skb, 2);
2423 skb_put(new_skb, len);
2425 bnx2_reuse_rx_skb(bp, skb,
2426 sw_ring_cons, sw_ring_prod);
2430 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2431 pci_unmap_single(bp->pdev, dma_addr,
2432 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2434 skb_reserve(skb, bp->rx_offset);
2439 bnx2_reuse_rx_skb(bp, skb,
2440 sw_ring_cons, sw_ring_prod);
2444 skb->protocol = eth_type_trans(skb, bp->dev);
2446 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2447 (ntohs(skb->protocol) != 0x8100)) {
2454 skb->ip_summed = CHECKSUM_NONE;
2456 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2457 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2459 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2460 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2461 skb->ip_summed = CHECKSUM_UNNECESSARY;
2465 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2466 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2467 rx_hdr->l2_fhdr_vlan_tag);
2471 netif_receive_skb(skb);
2473 bp->dev->last_rx = jiffies;
2477 sw_cons = NEXT_RX_BD(sw_cons);
2478 sw_prod = NEXT_RX_BD(sw_prod);
2480 if ((rx_pkt == budget))
2483 /* Refresh hw_cons to see if there is new work */
2484 if (sw_cons == hw_cons) {
2485 hw_cons = bp->hw_rx_cons =
2486 sblk->status_rx_quick_consumer_index0;
2487 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2492 bp->rx_cons = sw_cons;
2493 bp->rx_prod = sw_prod;
2495 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2497 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2505 /* MSI ISR - The only difference between this and the INTx ISR
2506 * is that the MSI interrupt is always serviced.
2509 bnx2_msi(int irq, void *dev_instance)
2511 struct net_device *dev = dev_instance;
2512 struct bnx2 *bp = netdev_priv(dev);
2514 prefetch(bp->status_blk);
2515 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2516 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2517 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2519 /* Return here if interrupt is disabled. */
2520 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2523 netif_rx_schedule(dev);
2529 bnx2_msi_1shot(int irq, void *dev_instance)
2531 struct net_device *dev = dev_instance;
2532 struct bnx2 *bp = netdev_priv(dev);
2534 prefetch(bp->status_blk);
2536 /* Return here if interrupt is disabled. */
2537 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2540 netif_rx_schedule(dev);
2546 bnx2_interrupt(int irq, void *dev_instance)
2548 struct net_device *dev = dev_instance;
2549 struct bnx2 *bp = netdev_priv(dev);
2551 /* When using INTx, it is possible for the interrupt to arrive
2552 * at the CPU before the status block posted prior to the
2553 * interrupt. Reading a register will flush the status block.
2554 * When using MSI, the MSI message will always complete after
2555 * the status block write.
2557 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2558 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2559 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2562 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2563 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2564 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2566 /* Return here if interrupt is shared and is disabled. */
2567 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2570 netif_rx_schedule(dev);
2575 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2576 STATUS_ATTN_BITS_TIMER_ABORT)
2579 bnx2_has_work(struct bnx2 *bp)
2581 struct status_block *sblk = bp->status_blk;
2583 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2584 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2587 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2588 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2595 bnx2_poll(struct net_device *dev, int *budget)
2597 struct bnx2 *bp = netdev_priv(dev);
2598 struct status_block *sblk = bp->status_blk;
2599 u32 status_attn_bits = sblk->status_attn_bits;
2600 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2602 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2603 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2607 /* This is needed to take care of transient status
2608 * during link changes.
2610 REG_WR(bp, BNX2_HC_COMMAND,
2611 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2612 REG_RD(bp, BNX2_HC_COMMAND);
2615 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2618 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2619 int orig_budget = *budget;
2622 if (orig_budget > dev->quota)
2623 orig_budget = dev->quota;
2625 work_done = bnx2_rx_int(bp, orig_budget);
2626 *budget -= work_done;
2627 dev->quota -= work_done;
2630 bp->last_status_idx = bp->status_blk->status_idx;
2633 if (!bnx2_has_work(bp)) {
2634 netif_rx_complete(dev);
2635 if (likely(bp->flags & USING_MSI_FLAG)) {
2636 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2637 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2638 bp->last_status_idx);
2641 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2642 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2643 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2644 bp->last_status_idx);
2646 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2647 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2648 bp->last_status_idx);
2655 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2656 * from set_multicast.
2659 bnx2_set_rx_mode(struct net_device *dev)
2661 struct bnx2 *bp = netdev_priv(dev);
2662 u32 rx_mode, sort_mode;
2665 spin_lock_bh(&bp->phy_lock);
2667 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2668 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2669 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2671 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2672 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2674 if (!(bp->flags & ASF_ENABLE_FLAG))
2675 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2677 if (dev->flags & IFF_PROMISC) {
2678 /* Promiscuous mode. */
2679 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2680 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2681 BNX2_RPM_SORT_USER0_PROM_VLAN;
2683 else if (dev->flags & IFF_ALLMULTI) {
2684 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2685 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2688 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2691 /* Accept one or more multicast(s). */
2692 struct dev_mc_list *mclist;
2693 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2698 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2700 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2701 i++, mclist = mclist->next) {
2703 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2705 regidx = (bit & 0xe0) >> 5;
2707 mc_filter[regidx] |= (1 << bit);
2710 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2711 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2715 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2718 if (rx_mode != bp->rx_mode) {
2719 bp->rx_mode = rx_mode;
2720 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2723 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2724 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2725 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2727 spin_unlock_bh(&bp->phy_lock);
2730 #define FW_BUF_SIZE 0x8000
2733 bnx2_gunzip_init(struct bnx2 *bp)
2735 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2738 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2741 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2742 if (bp->strm->workspace == NULL)
2752 vfree(bp->gunzip_buf);
2753 bp->gunzip_buf = NULL;
2756 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2757 "uncompression.\n", bp->dev->name);
2762 bnx2_gunzip_end(struct bnx2 *bp)
2764 kfree(bp->strm->workspace);
2769 if (bp->gunzip_buf) {
2770 vfree(bp->gunzip_buf);
2771 bp->gunzip_buf = NULL;
2776 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2780 /* check gzip header */
2781 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2787 if (zbuf[3] & FNAME)
2788 while ((zbuf[n++] != 0) && (n < len));
2790 bp->strm->next_in = zbuf + n;
2791 bp->strm->avail_in = len - n;
2792 bp->strm->next_out = bp->gunzip_buf;
2793 bp->strm->avail_out = FW_BUF_SIZE;
2795 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2799 rc = zlib_inflate(bp->strm, Z_FINISH);
2801 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2802 *outbuf = bp->gunzip_buf;
2804 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2805 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2806 bp->dev->name, bp->strm->msg);
2808 zlib_inflateEnd(bp->strm);
2810 if (rc == Z_STREAM_END)
2817 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2824 for (i = 0; i < rv2p_code_len; i += 8) {
2825 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2827 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2830 if (rv2p_proc == RV2P_PROC1) {
2831 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2832 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2835 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2836 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2840 /* Reset the processor, un-stall is done later. */
2841 if (rv2p_proc == RV2P_PROC1) {
2842 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2845 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2850 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2857 val = REG_RD_IND(bp, cpu_reg->mode);
2858 val |= cpu_reg->mode_value_halt;
2859 REG_WR_IND(bp, cpu_reg->mode, val);
2860 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2862 /* Load the Text area. */
2863 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2868 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2878 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2879 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2883 /* Load the Data area. */
2884 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2888 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2889 REG_WR_IND(bp, offset, fw->data[j]);
2893 /* Load the SBSS area. */
2894 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2898 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2899 REG_WR_IND(bp, offset, fw->sbss[j]);
2903 /* Load the BSS area. */
2904 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2908 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2909 REG_WR_IND(bp, offset, fw->bss[j]);
2913 /* Load the Read-Only area. */
2914 offset = cpu_reg->spad_base +
2915 (fw->rodata_addr - cpu_reg->mips_view_base);
2919 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2920 REG_WR_IND(bp, offset, fw->rodata[j]);
2924 /* Clear the pre-fetch instruction. */
2925 REG_WR_IND(bp, cpu_reg->inst, 0);
2926 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2928 /* Start the CPU. */
2929 val = REG_RD_IND(bp, cpu_reg->mode);
2930 val &= ~cpu_reg->mode_value_halt;
2931 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2932 REG_WR_IND(bp, cpu_reg->mode, val);
2938 bnx2_init_cpus(struct bnx2 *bp)
2940 struct cpu_reg cpu_reg;
2946 if ((rc = bnx2_gunzip_init(bp)) != 0)
2949 /* Initialize the RV2P processor. */
2950 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2955 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2957 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2962 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2964 /* Initialize the RX Processor. */
2965 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2966 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2967 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2968 cpu_reg.state = BNX2_RXP_CPU_STATE;
2969 cpu_reg.state_value_clear = 0xffffff;
2970 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2971 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2972 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2973 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2974 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2975 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2976 cpu_reg.mips_view_base = 0x8000000;
2978 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2979 fw = &bnx2_rxp_fw_09;
2981 fw = &bnx2_rxp_fw_06;
2983 rc = load_cpu_fw(bp, &cpu_reg, fw);
2987 /* Initialize the TX Processor. */
2988 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2989 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2990 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2991 cpu_reg.state = BNX2_TXP_CPU_STATE;
2992 cpu_reg.state_value_clear = 0xffffff;
2993 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2994 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2995 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2996 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2997 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2998 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2999 cpu_reg.mips_view_base = 0x8000000;
3001 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3002 fw = &bnx2_txp_fw_09;
3004 fw = &bnx2_txp_fw_06;
3006 rc = load_cpu_fw(bp, &cpu_reg, fw);
3010 /* Initialize the TX Patch-up Processor. */
3011 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3012 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3013 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3014 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3015 cpu_reg.state_value_clear = 0xffffff;
3016 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3017 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3018 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3019 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3020 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3021 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3022 cpu_reg.mips_view_base = 0x8000000;
3024 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3025 fw = &bnx2_tpat_fw_09;
3027 fw = &bnx2_tpat_fw_06;
3029 rc = load_cpu_fw(bp, &cpu_reg, fw);
3033 /* Initialize the Completion Processor. */
3034 cpu_reg.mode = BNX2_COM_CPU_MODE;
3035 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3036 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3037 cpu_reg.state = BNX2_COM_CPU_STATE;
3038 cpu_reg.state_value_clear = 0xffffff;
3039 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3040 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3041 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3042 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3043 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3044 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3045 cpu_reg.mips_view_base = 0x8000000;
3047 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3048 fw = &bnx2_com_fw_09;
3050 fw = &bnx2_com_fw_06;
3052 rc = load_cpu_fw(bp, &cpu_reg, fw);
3056 /* Initialize the Command Processor. */
3057 cpu_reg.mode = BNX2_CP_CPU_MODE;
3058 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3059 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3060 cpu_reg.state = BNX2_CP_CPU_STATE;
3061 cpu_reg.state_value_clear = 0xffffff;
3062 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3063 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3064 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3065 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3066 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3067 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3068 cpu_reg.mips_view_base = 0x8000000;
3070 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3071 fw = &bnx2_cp_fw_09;
3073 rc = load_cpu_fw(bp, &cpu_reg, fw);
3078 bnx2_gunzip_end(bp);
3083 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3087 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3093 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3094 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3095 PCI_PM_CTRL_PME_STATUS);
3097 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3098 /* delay required during transition out of D3hot */
3101 val = REG_RD(bp, BNX2_EMAC_MODE);
3102 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3103 val &= ~BNX2_EMAC_MODE_MPKT;
3104 REG_WR(bp, BNX2_EMAC_MODE, val);
3106 val = REG_RD(bp, BNX2_RPM_CONFIG);
3107 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3108 REG_WR(bp, BNX2_RPM_CONFIG, val);
3119 autoneg = bp->autoneg;
3120 advertising = bp->advertising;
3122 bp->autoneg = AUTONEG_SPEED;
3123 bp->advertising = ADVERTISED_10baseT_Half |
3124 ADVERTISED_10baseT_Full |
3125 ADVERTISED_100baseT_Half |
3126 ADVERTISED_100baseT_Full |
3129 bnx2_setup_copper_phy(bp);
3131 bp->autoneg = autoneg;
3132 bp->advertising = advertising;
3134 bnx2_set_mac_addr(bp);
3136 val = REG_RD(bp, BNX2_EMAC_MODE);
3138 /* Enable port mode. */
3139 val &= ~BNX2_EMAC_MODE_PORT;
3140 val |= BNX2_EMAC_MODE_PORT_MII |
3141 BNX2_EMAC_MODE_MPKT_RCVD |
3142 BNX2_EMAC_MODE_ACPI_RCVD |
3143 BNX2_EMAC_MODE_MPKT;
3145 REG_WR(bp, BNX2_EMAC_MODE, val);
3147 /* receive all multicast */
3148 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3149 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3152 REG_WR(bp, BNX2_EMAC_RX_MODE,
3153 BNX2_EMAC_RX_MODE_SORT_MODE);
3155 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3156 BNX2_RPM_SORT_USER0_MC_EN;
3157 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3158 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3159 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3160 BNX2_RPM_SORT_USER0_ENA);
3162 /* Need to enable EMAC and RPM for WOL. */
3163 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3164 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3165 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3166 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3168 val = REG_RD(bp, BNX2_RPM_CONFIG);
3169 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3170 REG_WR(bp, BNX2_RPM_CONFIG, val);
3172 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3175 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3178 if (!(bp->flags & NO_WOL_FLAG))
3179 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3181 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3182 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3183 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3192 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3194 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3197 /* No more memory access after this point until
3198 * device is brought back to D0.
3210 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3215 /* Request access to the flash interface. */
3216 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3217 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3218 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3219 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3225 if (j >= NVRAM_TIMEOUT_COUNT)
3232 bnx2_release_nvram_lock(struct bnx2 *bp)
3237 /* Relinquish nvram interface. */
3238 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3240 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3241 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3242 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3248 if (j >= NVRAM_TIMEOUT_COUNT)
3256 bnx2_enable_nvram_write(struct bnx2 *bp)
3260 val = REG_RD(bp, BNX2_MISC_CFG);
3261 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3263 if (!bp->flash_info->buffered) {
3266 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3267 REG_WR(bp, BNX2_NVM_COMMAND,
3268 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3270 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3273 val = REG_RD(bp, BNX2_NVM_COMMAND);
3274 if (val & BNX2_NVM_COMMAND_DONE)
3278 if (j >= NVRAM_TIMEOUT_COUNT)
3285 bnx2_disable_nvram_write(struct bnx2 *bp)
3289 val = REG_RD(bp, BNX2_MISC_CFG);
3290 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3295 bnx2_enable_nvram_access(struct bnx2 *bp)
3299 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3300 /* Enable both bits, even on read. */
3301 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3302 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3306 bnx2_disable_nvram_access(struct bnx2 *bp)
3310 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3311 /* Disable both bits, even after read. */
3312 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3313 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3314 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3318 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3323 if (bp->flash_info->buffered)
3324 /* Buffered flash, no erase needed */
3327 /* Build an erase command */
3328 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3329 BNX2_NVM_COMMAND_DOIT;
3331 /* Need to clear DONE bit separately. */
3332 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3334 /* Address of the NVRAM to read from. */
3335 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3337 /* Issue an erase command. */
3338 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3340 /* Wait for completion. */
3341 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3346 val = REG_RD(bp, BNX2_NVM_COMMAND);
3347 if (val & BNX2_NVM_COMMAND_DONE)
3351 if (j >= NVRAM_TIMEOUT_COUNT)
3358 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3363 /* Build the command word. */
3364 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3366 /* Calculate an offset of a buffered flash. */
3367 if (bp->flash_info->buffered) {
3368 offset = ((offset / bp->flash_info->page_size) <<
3369 bp->flash_info->page_bits) +
3370 (offset % bp->flash_info->page_size);
3373 /* Need to clear DONE bit separately. */
3374 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3376 /* Address of the NVRAM to read from. */
3377 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3379 /* Issue a read command. */
3380 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3382 /* Wait for completion. */
3383 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3388 val = REG_RD(bp, BNX2_NVM_COMMAND);
3389 if (val & BNX2_NVM_COMMAND_DONE) {
3390 val = REG_RD(bp, BNX2_NVM_READ);
3392 val = be32_to_cpu(val);
3393 memcpy(ret_val, &val, 4);
3397 if (j >= NVRAM_TIMEOUT_COUNT)
3405 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3410 /* Build the command word. */
3411 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3413 /* Calculate an offset of a buffered flash. */
3414 if (bp->flash_info->buffered) {
3415 offset = ((offset / bp->flash_info->page_size) <<
3416 bp->flash_info->page_bits) +
3417 (offset % bp->flash_info->page_size);
3420 /* Need to clear DONE bit separately. */
3421 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3423 memcpy(&val32, val, 4);
3424 val32 = cpu_to_be32(val32);
3426 /* Write the data. */
3427 REG_WR(bp, BNX2_NVM_WRITE, val32);
3429 /* Address of the NVRAM to write to. */
3430 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3432 /* Issue the write command. */
3433 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3435 /* Wait for completion. */
3436 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3439 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3442 if (j >= NVRAM_TIMEOUT_COUNT)
3449 bnx2_init_nvram(struct bnx2 *bp)
3452 int j, entry_count, rc;
3453 struct flash_spec *flash;
3455 /* Determine the selected interface. */
3456 val = REG_RD(bp, BNX2_NVM_CFG1);
3458 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3461 if (val & 0x40000000) {
3463 /* Flash interface has been reconfigured */
3464 for (j = 0, flash = &flash_table[0]; j < entry_count;
3466 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3467 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3468 bp->flash_info = flash;
3475 /* Not yet been reconfigured */
3477 if (val & (1 << 23))
3478 mask = FLASH_BACKUP_STRAP_MASK;
3480 mask = FLASH_STRAP_MASK;
3482 for (j = 0, flash = &flash_table[0]; j < entry_count;
3485 if ((val & mask) == (flash->strapping & mask)) {
3486 bp->flash_info = flash;
3488 /* Request access to the flash interface. */
3489 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3492 /* Enable access to flash interface */
3493 bnx2_enable_nvram_access(bp);
3495 /* Reconfigure the flash interface */
3496 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3497 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3498 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3499 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3501 /* Disable access to flash interface */
3502 bnx2_disable_nvram_access(bp);
3503 bnx2_release_nvram_lock(bp);
3508 } /* if (val & 0x40000000) */
3510 if (j == entry_count) {
3511 bp->flash_info = NULL;
3512 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3516 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3517 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3519 bp->flash_size = val;
3521 bp->flash_size = bp->flash_info->total_size;
3527 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3531 u32 cmd_flags, offset32, len32, extra;
3536 /* Request access to the flash interface. */
3537 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3540 /* Enable access to flash interface */
3541 bnx2_enable_nvram_access(bp);
3554 pre_len = 4 - (offset & 3);
3556 if (pre_len >= len32) {
3558 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3559 BNX2_NVM_COMMAND_LAST;
3562 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3565 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3570 memcpy(ret_buf, buf + (offset & 3), pre_len);
3577 extra = 4 - (len32 & 3);
3578 len32 = (len32 + 4) & ~3;
3585 cmd_flags = BNX2_NVM_COMMAND_LAST;
3587 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3588 BNX2_NVM_COMMAND_LAST;
3590 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3592 memcpy(ret_buf, buf, 4 - extra);
3594 else if (len32 > 0) {
3597 /* Read the first word. */
3601 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3603 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3605 /* Advance to the next dword. */
3610 while (len32 > 4 && rc == 0) {
3611 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3613 /* Advance to the next dword. */
3622 cmd_flags = BNX2_NVM_COMMAND_LAST;
3623 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3625 memcpy(ret_buf, buf, 4 - extra);
3628 /* Disable access to flash interface */
3629 bnx2_disable_nvram_access(bp);
3631 bnx2_release_nvram_lock(bp);
3637 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3640 u32 written, offset32, len32;
3641 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3643 int align_start, align_end;
3648 align_start = align_end = 0;
3650 if ((align_start = (offset32 & 3))) {
3652 len32 += align_start;
3655 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3660 align_end = 4 - (len32 & 3);
3662 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3666 if (align_start || align_end) {
3667 align_buf = kmalloc(len32, GFP_KERNEL);
3668 if (align_buf == NULL)
3671 memcpy(align_buf, start, 4);
3674 memcpy(align_buf + len32 - 4, end, 4);
3676 memcpy(align_buf + align_start, data_buf, buf_size);
3680 if (bp->flash_info->buffered == 0) {
3681 flash_buffer = kmalloc(264, GFP_KERNEL);
3682 if (flash_buffer == NULL) {
3684 goto nvram_write_end;
3689 while ((written < len32) && (rc == 0)) {
3690 u32 page_start, page_end, data_start, data_end;
3691 u32 addr, cmd_flags;
3694 /* Find the page_start addr */
3695 page_start = offset32 + written;
3696 page_start -= (page_start % bp->flash_info->page_size);
3697 /* Find the page_end addr */
3698 page_end = page_start + bp->flash_info->page_size;
3699 /* Find the data_start addr */
3700 data_start = (written == 0) ? offset32 : page_start;
3701 /* Find the data_end addr */
3702 data_end = (page_end > offset32 + len32) ?
3703 (offset32 + len32) : page_end;
3705 /* Request access to the flash interface. */
3706 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3707 goto nvram_write_end;
3709 /* Enable access to flash interface */
3710 bnx2_enable_nvram_access(bp);
3712 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3713 if (bp->flash_info->buffered == 0) {
3716 /* Read the whole page into the buffer
3717 * (non-buffer flash only) */
3718 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3719 if (j == (bp->flash_info->page_size - 4)) {
3720 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3722 rc = bnx2_nvram_read_dword(bp,
3728 goto nvram_write_end;
3734 /* Enable writes to flash interface (unlock write-protect) */
3735 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3736 goto nvram_write_end;
3738 /* Loop to write back the buffer data from page_start to
3741 if (bp->flash_info->buffered == 0) {
3742 /* Erase the page */
3743 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3744 goto nvram_write_end;
3746 /* Re-enable the write again for the actual write */
3747 bnx2_enable_nvram_write(bp);
3749 for (addr = page_start; addr < data_start;
3750 addr += 4, i += 4) {
3752 rc = bnx2_nvram_write_dword(bp, addr,
3753 &flash_buffer[i], cmd_flags);
3756 goto nvram_write_end;
3762 /* Loop to write the new data from data_start to data_end */
3763 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3764 if ((addr == page_end - 4) ||
3765 ((bp->flash_info->buffered) &&
3766 (addr == data_end - 4))) {
3768 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3770 rc = bnx2_nvram_write_dword(bp, addr, buf,
3774 goto nvram_write_end;
3780 /* Loop to write back the buffer data from data_end
3782 if (bp->flash_info->buffered == 0) {
3783 for (addr = data_end; addr < page_end;
3784 addr += 4, i += 4) {
3786 if (addr == page_end-4) {
3787 cmd_flags = BNX2_NVM_COMMAND_LAST;
3789 rc = bnx2_nvram_write_dword(bp, addr,
3790 &flash_buffer[i], cmd_flags);
3793 goto nvram_write_end;
3799 /* Disable writes to flash interface (lock write-protect) */
3800 bnx2_disable_nvram_write(bp);
3802 /* Disable access to flash interface */
3803 bnx2_disable_nvram_access(bp);
3804 bnx2_release_nvram_lock(bp);
3806 /* Increment written */
3807 written += data_end - data_start;
3811 kfree(flash_buffer);
3817 bnx2_init_remote_phy(struct bnx2 *bp)
3821 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3822 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3825 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3826 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3829 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3830 if (netif_running(bp->dev)) {
3831 val = BNX2_DRV_ACK_CAP_SIGNATURE |
3832 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3833 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3836 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3838 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3839 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3840 bp->phy_port = PORT_FIBRE;
3842 bp->phy_port = PORT_TP;
3847 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3852 /* Wait for the current PCI transaction to complete before
3853 * issuing a reset. */
3854 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3855 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3856 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3857 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3858 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3859 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3862 /* Wait for the firmware to tell us it is ok to issue a reset. */
3863 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3865 /* Deposit a driver reset signature so the firmware knows that
3866 * this is a soft reset. */
3867 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3868 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3870 /* Do a dummy read to force the chip to complete all current transaction
3871 * before we issue a reset. */
3872 val = REG_RD(bp, BNX2_MISC_ID);
3874 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3875 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3876 REG_RD(bp, BNX2_MISC_COMMAND);
3879 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3880 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3882 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3885 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3886 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3887 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3890 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3892 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3893 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3894 current->state = TASK_UNINTERRUPTIBLE;
3895 schedule_timeout(HZ / 50);
3898 /* Reset takes approximate 30 usec */
3899 for (i = 0; i < 10; i++) {
3900 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3901 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3902 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3907 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3908 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3909 printk(KERN_ERR PFX "Chip reset did not complete\n");
3914 /* Make sure byte swapping is properly configured. */
3915 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3916 if (val != 0x01020304) {
3917 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3921 /* Wait for the firmware to finish its initialization. */
3922 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3926 spin_lock_bh(&bp->phy_lock);
3927 bnx2_init_remote_phy(bp);
3928 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
3929 bnx2_set_default_remote_link(bp);
3930 spin_unlock_bh(&bp->phy_lock);
3932 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3933 /* Adjust the voltage regular to two steps lower. The default
3934 * of this register is 0x0000000e. */
3935 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3937 /* Remove bad rbuf memory from the free pool. */
3938 rc = bnx2_alloc_bad_rbuf(bp);
3945 bnx2_init_chip(struct bnx2 *bp)
3950 /* Make sure the interrupt is not active. */
3951 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3953 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3954 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3956 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3958 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3959 DMA_READ_CHANS << 12 |
3960 DMA_WRITE_CHANS << 16;
3962 val |= (0x2 << 20) | (1 << 11);
3964 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3967 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3968 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3969 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3971 REG_WR(bp, BNX2_DMA_CONFIG, val);
3973 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3974 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3975 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3976 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3979 if (bp->flags & PCIX_FLAG) {
3982 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3984 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3985 val16 & ~PCI_X_CMD_ERO);
3988 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3989 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3990 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3991 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3993 /* Initialize context mapping and zero out the quick contexts. The
3994 * context block must have already been enabled. */
3995 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3996 rc = bnx2_init_5709_context(bp);
4000 bnx2_init_context(bp);
4002 if ((rc = bnx2_init_cpus(bp)) != 0)
4005 bnx2_init_nvram(bp);
4007 bnx2_set_mac_addr(bp);
4009 val = REG_RD(bp, BNX2_MQ_CONFIG);
4010 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4011 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4012 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4013 val |= BNX2_MQ_CONFIG_HALT_DIS;
4015 REG_WR(bp, BNX2_MQ_CONFIG, val);
4017 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4018 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4019 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4021 val = (BCM_PAGE_BITS - 8) << 24;
4022 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4024 /* Configure page size. */
4025 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4026 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4027 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4028 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4030 val = bp->mac_addr[0] +
4031 (bp->mac_addr[1] << 8) +
4032 (bp->mac_addr[2] << 16) +
4034 (bp->mac_addr[4] << 8) +
4035 (bp->mac_addr[5] << 16);
4036 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4038 /* Program the MTU. Also include 4 bytes for CRC32. */
4039 val = bp->dev->mtu + ETH_HLEN + 4;
4040 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4041 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4042 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4044 bp->last_status_idx = 0;
4045 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4047 /* Set up how to generate a link change interrupt. */
4048 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4050 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4051 (u64) bp->status_blk_mapping & 0xffffffff);
4052 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4054 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4055 (u64) bp->stats_blk_mapping & 0xffffffff);
4056 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4057 (u64) bp->stats_blk_mapping >> 32);
4059 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4060 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4062 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4063 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4065 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4066 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4068 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4070 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4072 REG_WR(bp, BNX2_HC_COM_TICKS,
4073 (bp->com_ticks_int << 16) | bp->com_ticks);
4075 REG_WR(bp, BNX2_HC_CMD_TICKS,
4076 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4078 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4079 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4081 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
4082 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4084 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4085 val = BNX2_HC_CONFIG_COLLECT_STATS;
4087 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4088 BNX2_HC_CONFIG_COLLECT_STATS;
4091 if (bp->flags & ONE_SHOT_MSI_FLAG)
4092 val |= BNX2_HC_CONFIG_ONE_SHOT;
4094 REG_WR(bp, BNX2_HC_CONFIG, val);
4096 /* Clear internal stats counters. */
4097 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4099 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4101 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
4102 BNX2_PORT_FEATURE_ASF_ENABLED)
4103 bp->flags |= ASF_ENABLE_FLAG;
4105 /* Initialize the receive filter. */
4106 bnx2_set_rx_mode(bp->dev);
4108 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4109 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4110 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4111 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4113 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4116 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
4117 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4121 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4127 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4129 u32 val, offset0, offset1, offset2, offset3;
4131 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4132 offset0 = BNX2_L2CTX_TYPE_XI;
4133 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4134 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4135 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4137 offset0 = BNX2_L2CTX_TYPE;
4138 offset1 = BNX2_L2CTX_CMD_TYPE;
4139 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4140 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4142 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4143 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4145 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4146 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4148 val = (u64) bp->tx_desc_mapping >> 32;
4149 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4151 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4152 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4156 bnx2_init_tx_ring(struct bnx2 *bp)
4161 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4163 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4165 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4166 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4171 bp->tx_prod_bseq = 0;
4174 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4175 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4177 bnx2_init_tx_context(bp, cid);
4181 bnx2_init_rx_ring(struct bnx2 *bp)
4185 u16 prod, ring_prod;
4188 /* 8 for CRC and VLAN */
4189 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4191 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4193 ring_prod = prod = bp->rx_prod = 0;
4196 bp->rx_prod_bseq = 0;
4198 for (i = 0; i < bp->rx_max_ring; i++) {
4201 rxbd = &bp->rx_desc_ring[i][0];
4202 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4203 rxbd->rx_bd_len = bp->rx_buf_use_size;
4204 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4206 if (i == (bp->rx_max_ring - 1))
4210 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4211 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4215 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4216 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4218 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4220 val = (u64) bp->rx_desc_mapping[0] >> 32;
4221 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4223 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4224 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4226 for (i = 0; i < bp->rx_ring_size; i++) {
4227 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4230 prod = NEXT_RX_BD(prod);
4231 ring_prod = RX_RING_IDX(prod);
4235 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4237 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4241 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4245 bp->rx_ring_size = size;
4247 while (size > MAX_RX_DESC_CNT) {
4248 size -= MAX_RX_DESC_CNT;
4251 /* round to next power of 2 */
4253 while ((max & num_rings) == 0)
4256 if (num_rings != max)
4259 bp->rx_max_ring = max;
4260 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4264 bnx2_free_tx_skbs(struct bnx2 *bp)
4268 if (bp->tx_buf_ring == NULL)
4271 for (i = 0; i < TX_DESC_CNT; ) {
4272 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4273 struct sk_buff *skb = tx_buf->skb;
4281 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4282 skb_headlen(skb), PCI_DMA_TODEVICE);
4286 last = skb_shinfo(skb)->nr_frags;
4287 for (j = 0; j < last; j++) {
4288 tx_buf = &bp->tx_buf_ring[i + j + 1];
4289 pci_unmap_page(bp->pdev,
4290 pci_unmap_addr(tx_buf, mapping),
4291 skb_shinfo(skb)->frags[j].size,
4301 bnx2_free_rx_skbs(struct bnx2 *bp)
4305 if (bp->rx_buf_ring == NULL)
4308 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4309 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4310 struct sk_buff *skb = rx_buf->skb;
4315 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4316 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4325 bnx2_free_skbs(struct bnx2 *bp)
4327 bnx2_free_tx_skbs(bp);
4328 bnx2_free_rx_skbs(bp);
4332 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4336 rc = bnx2_reset_chip(bp, reset_code);
4341 if ((rc = bnx2_init_chip(bp)) != 0)
4344 bnx2_init_tx_ring(bp);
4345 bnx2_init_rx_ring(bp);
4350 bnx2_init_nic(struct bnx2 *bp)
4354 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4357 spin_lock_bh(&bp->phy_lock);
4360 spin_unlock_bh(&bp->phy_lock);
4365 bnx2_test_registers(struct bnx2 *bp)
4369 static const struct {
4372 #define BNX2_FL_NOT_5709 1
4376 { 0x006c, 0, 0x00000000, 0x0000003f },
4377 { 0x0090, 0, 0xffffffff, 0x00000000 },
4378 { 0x0094, 0, 0x00000000, 0x00000000 },
4380 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4381 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4382 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4383 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4384 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4385 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4386 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4387 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4388 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4390 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4391 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4392 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4393 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4394 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4395 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4397 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4398 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4399 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4401 { 0x1000, 0, 0x00000000, 0x00000001 },
4402 { 0x1004, 0, 0x00000000, 0x000f0001 },
4404 { 0x1408, 0, 0x01c00800, 0x00000000 },
4405 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4406 { 0x14a8, 0, 0x00000000, 0x000001ff },
4407 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4408 { 0x14b0, 0, 0x00000002, 0x00000001 },
4409 { 0x14b8, 0, 0x00000000, 0x00000000 },
4410 { 0x14c0, 0, 0x00000000, 0x00000009 },
4411 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4412 { 0x14cc, 0, 0x00000000, 0x00000001 },
4413 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4415 { 0x1800, 0, 0x00000000, 0x00000001 },
4416 { 0x1804, 0, 0x00000000, 0x00000003 },
4418 { 0x2800, 0, 0x00000000, 0x00000001 },
4419 { 0x2804, 0, 0x00000000, 0x00003f01 },
4420 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4421 { 0x2810, 0, 0xffff0000, 0x00000000 },
4422 { 0x2814, 0, 0xffff0000, 0x00000000 },
4423 { 0x2818, 0, 0xffff0000, 0x00000000 },
4424 { 0x281c, 0, 0xffff0000, 0x00000000 },
4425 { 0x2834, 0, 0xffffffff, 0x00000000 },
4426 { 0x2840, 0, 0x00000000, 0xffffffff },
4427 { 0x2844, 0, 0x00000000, 0xffffffff },
4428 { 0x2848, 0, 0xffffffff, 0x00000000 },
4429 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4431 { 0x2c00, 0, 0x00000000, 0x00000011 },
4432 { 0x2c04, 0, 0x00000000, 0x00030007 },
4434 { 0x3c00, 0, 0x00000000, 0x00000001 },
4435 { 0x3c04, 0, 0x00000000, 0x00070000 },
4436 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4437 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4438 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4439 { 0x3c14, 0, 0x00000000, 0xffffffff },
4440 { 0x3c18, 0, 0x00000000, 0xffffffff },
4441 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4442 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4444 { 0x5004, 0, 0x00000000, 0x0000007f },
4445 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4447 { 0x5c00, 0, 0x00000000, 0x00000001 },
4448 { 0x5c04, 0, 0x00000000, 0x0003000f },
4449 { 0x5c08, 0, 0x00000003, 0x00000000 },
4450 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4451 { 0x5c10, 0, 0x00000000, 0xffffffff },
4452 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4453 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4454 { 0x5c88, 0, 0x00000000, 0x00077373 },
4455 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4457 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4458 { 0x680c, 0, 0xffffffff, 0x00000000 },
4459 { 0x6810, 0, 0xffffffff, 0x00000000 },
4460 { 0x6814, 0, 0xffffffff, 0x00000000 },
4461 { 0x6818, 0, 0xffffffff, 0x00000000 },
4462 { 0x681c, 0, 0xffffffff, 0x00000000 },
4463 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4464 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4465 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4466 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4467 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4468 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4469 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4470 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4471 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4472 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4473 { 0x684c, 0, 0xffffffff, 0x00000000 },
4474 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4475 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4476 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4477 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4478 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4479 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4481 { 0xffff, 0, 0x00000000, 0x00000000 },
4486 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4489 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4490 u32 offset, rw_mask, ro_mask, save_val, val;
4491 u16 flags = reg_tbl[i].flags;
4493 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4496 offset = (u32) reg_tbl[i].offset;
4497 rw_mask = reg_tbl[i].rw_mask;
4498 ro_mask = reg_tbl[i].ro_mask;
4500 save_val = readl(bp->regview + offset);
4502 writel(0, bp->regview + offset);
4504 val = readl(bp->regview + offset);
4505 if ((val & rw_mask) != 0) {
4509 if ((val & ro_mask) != (save_val & ro_mask)) {
4513 writel(0xffffffff, bp->regview + offset);
4515 val = readl(bp->regview + offset);
4516 if ((val & rw_mask) != rw_mask) {
4520 if ((val & ro_mask) != (save_val & ro_mask)) {
4524 writel(save_val, bp->regview + offset);
4528 writel(save_val, bp->regview + offset);
4536 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4538 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4539 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4542 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4545 for (offset = 0; offset < size; offset += 4) {
4547 REG_WR_IND(bp, start + offset, test_pattern[i]);
4549 if (REG_RD_IND(bp, start + offset) !=
4559 bnx2_test_memory(struct bnx2 *bp)
4563 static struct mem_entry {
4566 } mem_tbl_5706[] = {
4567 { 0x60000, 0x4000 },
4568 { 0xa0000, 0x3000 },
4569 { 0xe0000, 0x4000 },
4570 { 0x120000, 0x4000 },
4571 { 0x1a0000, 0x4000 },
4572 { 0x160000, 0x4000 },
4576 { 0x60000, 0x4000 },
4577 { 0xa0000, 0x3000 },
4578 { 0xe0000, 0x4000 },
4579 { 0x120000, 0x4000 },
4580 { 0x1a0000, 0x4000 },
4583 struct mem_entry *mem_tbl;
4585 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4586 mem_tbl = mem_tbl_5709;
4588 mem_tbl = mem_tbl_5706;
4590 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4591 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4592 mem_tbl[i].len)) != 0) {
4600 #define BNX2_MAC_LOOPBACK 0
4601 #define BNX2_PHY_LOOPBACK 1
4604 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4606 unsigned int pkt_size, num_pkts, i;
4607 struct sk_buff *skb, *rx_skb;
4608 unsigned char *packet;
4609 u16 rx_start_idx, rx_idx;
4612 struct sw_bd *rx_buf;
4613 struct l2_fhdr *rx_hdr;
4616 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4617 bp->loopback = MAC_LOOPBACK;
4618 bnx2_set_mac_loopback(bp);
4620 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4621 bp->loopback = PHY_LOOPBACK;
4622 bnx2_set_phy_loopback(bp);
4628 skb = netdev_alloc_skb(bp->dev, pkt_size);
4631 packet = skb_put(skb, pkt_size);
4632 memcpy(packet, bp->dev->dev_addr, 6);
4633 memset(packet + 6, 0x0, 8);
4634 for (i = 14; i < pkt_size; i++)
4635 packet[i] = (unsigned char) (i & 0xff);
4637 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4640 REG_WR(bp, BNX2_HC_COMMAND,
4641 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4643 REG_RD(bp, BNX2_HC_COMMAND);
4646 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4650 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4652 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4653 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4654 txbd->tx_bd_mss_nbytes = pkt_size;
4655 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4658 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4659 bp->tx_prod_bseq += pkt_size;
4661 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4662 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4666 REG_WR(bp, BNX2_HC_COMMAND,
4667 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4669 REG_RD(bp, BNX2_HC_COMMAND);
4673 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4676 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4677 goto loopback_test_done;
4680 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4681 if (rx_idx != rx_start_idx + num_pkts) {
4682 goto loopback_test_done;
4685 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4686 rx_skb = rx_buf->skb;
4688 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4689 skb_reserve(rx_skb, bp->rx_offset);
4691 pci_dma_sync_single_for_cpu(bp->pdev,
4692 pci_unmap_addr(rx_buf, mapping),
4693 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4695 if (rx_hdr->l2_fhdr_status &
4696 (L2_FHDR_ERRORS_BAD_CRC |
4697 L2_FHDR_ERRORS_PHY_DECODE |
4698 L2_FHDR_ERRORS_ALIGNMENT |
4699 L2_FHDR_ERRORS_TOO_SHORT |
4700 L2_FHDR_ERRORS_GIANT_FRAME)) {
4702 goto loopback_test_done;
4705 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4706 goto loopback_test_done;
4709 for (i = 14; i < pkt_size; i++) {
4710 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4711 goto loopback_test_done;
4722 #define BNX2_MAC_LOOPBACK_FAILED 1
4723 #define BNX2_PHY_LOOPBACK_FAILED 2
4724 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4725 BNX2_PHY_LOOPBACK_FAILED)
4728 bnx2_test_loopback(struct bnx2 *bp)
4732 if (!netif_running(bp->dev))
4733 return BNX2_LOOPBACK_FAILED;
4735 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4736 spin_lock_bh(&bp->phy_lock);
4738 spin_unlock_bh(&bp->phy_lock);
4739 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4740 rc |= BNX2_MAC_LOOPBACK_FAILED;
4741 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4742 rc |= BNX2_PHY_LOOPBACK_FAILED;
4746 #define NVRAM_SIZE 0x200
4747 #define CRC32_RESIDUAL 0xdebb20e3
4750 bnx2_test_nvram(struct bnx2 *bp)
4752 u32 buf[NVRAM_SIZE / 4];
4753 u8 *data = (u8 *) buf;
4757 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4758 goto test_nvram_done;
4760 magic = be32_to_cpu(buf[0]);
4761 if (magic != 0x669955aa) {
4763 goto test_nvram_done;
4766 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4767 goto test_nvram_done;
4769 csum = ether_crc_le(0x100, data);
4770 if (csum != CRC32_RESIDUAL) {
4772 goto test_nvram_done;
4775 csum = ether_crc_le(0x100, data + 0x100);
4776 if (csum != CRC32_RESIDUAL) {
4785 bnx2_test_link(struct bnx2 *bp)
4789 spin_lock_bh(&bp->phy_lock);
4790 bnx2_enable_bmsr1(bp);
4791 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4792 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4793 bnx2_disable_bmsr1(bp);
4794 spin_unlock_bh(&bp->phy_lock);
4796 if (bmsr & BMSR_LSTATUS) {
4803 bnx2_test_intr(struct bnx2 *bp)
4808 if (!netif_running(bp->dev))
4811 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4813 /* This register is not touched during run-time. */
4814 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4815 REG_RD(bp, BNX2_HC_COMMAND);
4817 for (i = 0; i < 10; i++) {
4818 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4824 msleep_interruptible(10);
4833 bnx2_5706_serdes_timer(struct bnx2 *bp)
4835 spin_lock(&bp->phy_lock);
4836 if (bp->serdes_an_pending)
4837 bp->serdes_an_pending--;
4838 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4841 bp->current_interval = bp->timer_interval;
4843 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4845 if (bmcr & BMCR_ANENABLE) {
4848 bnx2_write_phy(bp, 0x1c, 0x7c00);
4849 bnx2_read_phy(bp, 0x1c, &phy1);
4851 bnx2_write_phy(bp, 0x17, 0x0f01);
4852 bnx2_read_phy(bp, 0x15, &phy2);
4853 bnx2_write_phy(bp, 0x17, 0x0f01);
4854 bnx2_read_phy(bp, 0x15, &phy2);
4856 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4857 !(phy2 & 0x20)) { /* no CONFIG */
4859 bmcr &= ~BMCR_ANENABLE;
4860 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4861 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4862 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4866 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4867 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4870 bnx2_write_phy(bp, 0x17, 0x0f01);
4871 bnx2_read_phy(bp, 0x15, &phy2);
4875 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4876 bmcr |= BMCR_ANENABLE;
4877 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4879 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4882 bp->current_interval = bp->timer_interval;
4884 spin_unlock(&bp->phy_lock);
4888 bnx2_5708_serdes_timer(struct bnx2 *bp)
4890 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4893 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4894 bp->serdes_an_pending = 0;
4898 spin_lock(&bp->phy_lock);
4899 if (bp->serdes_an_pending)
4900 bp->serdes_an_pending--;
4901 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4904 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4905 if (bmcr & BMCR_ANENABLE) {
4906 bnx2_enable_forced_2g5(bp);
4907 bp->current_interval = SERDES_FORCED_TIMEOUT;
4909 bnx2_disable_forced_2g5(bp);
4910 bp->serdes_an_pending = 2;
4911 bp->current_interval = bp->timer_interval;
4915 bp->current_interval = bp->timer_interval;
4917 spin_unlock(&bp->phy_lock);
4921 bnx2_timer(unsigned long data)
4923 struct bnx2 *bp = (struct bnx2 *) data;
4926 if (!netif_running(bp->dev))
4929 if (atomic_read(&bp->intr_sem) != 0)
4930 goto bnx2_restart_timer;
4932 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4933 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4935 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4937 /* workaround occasional corrupted counters */
4938 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4939 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4940 BNX2_HC_COMMAND_STATS_NOW);
4942 if (bp->phy_flags & PHY_SERDES_FLAG) {
4943 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4944 bnx2_5706_serdes_timer(bp);
4946 bnx2_5708_serdes_timer(bp);
4950 mod_timer(&bp->timer, jiffies + bp->current_interval);
4954 bnx2_request_irq(struct bnx2 *bp)
4956 struct net_device *dev = bp->dev;
4959 if (bp->flags & USING_MSI_FLAG) {
4960 irq_handler_t fn = bnx2_msi;
4962 if (bp->flags & ONE_SHOT_MSI_FLAG)
4963 fn = bnx2_msi_1shot;
4965 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4967 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4968 IRQF_SHARED, dev->name, dev);
4973 bnx2_free_irq(struct bnx2 *bp)
4975 struct net_device *dev = bp->dev;
4977 if (bp->flags & USING_MSI_FLAG) {
4978 free_irq(bp->pdev->irq, dev);
4979 pci_disable_msi(bp->pdev);
4980 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4982 free_irq(bp->pdev->irq, dev);
4985 /* Called with rtnl_lock */
4987 bnx2_open(struct net_device *dev)
4989 struct bnx2 *bp = netdev_priv(dev);
4992 netif_carrier_off(dev);
4994 bnx2_set_power_state(bp, PCI_D0);
4995 bnx2_disable_int(bp);
4997 rc = bnx2_alloc_mem(bp);
5001 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
5002 if (pci_enable_msi(bp->pdev) == 0) {
5003 bp->flags |= USING_MSI_FLAG;
5004 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5005 bp->flags |= ONE_SHOT_MSI_FLAG;
5008 rc = bnx2_request_irq(bp);
5015 rc = bnx2_init_nic(bp);
5024 mod_timer(&bp->timer, jiffies + bp->current_interval);
5026 atomic_set(&bp->intr_sem, 0);
5028 bnx2_enable_int(bp);
5030 if (bp->flags & USING_MSI_FLAG) {
5031 /* Test MSI to make sure it is working
5032 * If MSI test fails, go back to INTx mode
5034 if (bnx2_test_intr(bp) != 0) {
5035 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5036 " using MSI, switching to INTx mode. Please"
5037 " report this failure to the PCI maintainer"
5038 " and include system chipset information.\n",
5041 bnx2_disable_int(bp);
5044 rc = bnx2_init_nic(bp);
5047 rc = bnx2_request_irq(bp);
5052 del_timer_sync(&bp->timer);
5055 bnx2_enable_int(bp);
5058 if (bp->flags & USING_MSI_FLAG) {
5059 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5062 netif_start_queue(dev);
5068 bnx2_reset_task(struct work_struct *work)
5070 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5072 if (!netif_running(bp->dev))
5075 bp->in_reset_task = 1;
5076 bnx2_netif_stop(bp);
5080 atomic_set(&bp->intr_sem, 1);
5081 bnx2_netif_start(bp);
5082 bp->in_reset_task = 0;
5086 bnx2_tx_timeout(struct net_device *dev)
5088 struct bnx2 *bp = netdev_priv(dev);
5090 /* This allows the netif to be shutdown gracefully before resetting */
5091 schedule_work(&bp->reset_task);
5095 /* Called with rtnl_lock */
5097 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5099 struct bnx2 *bp = netdev_priv(dev);
5101 bnx2_netif_stop(bp);
5104 bnx2_set_rx_mode(dev);
5106 bnx2_netif_start(bp);
5110 /* Called with netif_tx_lock.
5111 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5112 * netif_wake_queue().
5115 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5117 struct bnx2 *bp = netdev_priv(dev);
5120 struct sw_bd *tx_buf;
5121 u32 len, vlan_tag_flags, last_frag, mss;
5122 u16 prod, ring_prod;
5125 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5126 netif_stop_queue(dev);
5127 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5130 return NETDEV_TX_BUSY;
5132 len = skb_headlen(skb);
5134 ring_prod = TX_RING_IDX(prod);
5137 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5138 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5141 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5143 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5145 if ((mss = skb_shinfo(skb)->gso_size)) {
5146 u32 tcp_opt_len, ip_tcp_len;
5149 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5151 tcp_opt_len = tcp_optlen(skb);
5153 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5154 u32 tcp_off = skb_transport_offset(skb) -
5155 sizeof(struct ipv6hdr) - ETH_HLEN;
5157 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5158 TX_BD_FLAGS_SW_FLAGS;
5159 if (likely(tcp_off == 0))
5160 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5163 vlan_tag_flags |= ((tcp_off & 0x3) <<
5164 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5165 ((tcp_off & 0x10) <<
5166 TX_BD_FLAGS_TCP6_OFF4_SHL);
5167 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5170 if (skb_header_cloned(skb) &&
5171 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5173 return NETDEV_TX_OK;
5176 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5180 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5181 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5185 if (tcp_opt_len || (iph->ihl > 5)) {
5186 vlan_tag_flags |= ((iph->ihl - 5) +
5187 (tcp_opt_len >> 2)) << 8;
5193 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5195 tx_buf = &bp->tx_buf_ring[ring_prod];
5197 pci_unmap_addr_set(tx_buf, mapping, mapping);
5199 txbd = &bp->tx_desc_ring[ring_prod];
5201 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5202 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5203 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5204 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5206 last_frag = skb_shinfo(skb)->nr_frags;
5208 for (i = 0; i < last_frag; i++) {
5209 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5211 prod = NEXT_TX_BD(prod);
5212 ring_prod = TX_RING_IDX(prod);
5213 txbd = &bp->tx_desc_ring[ring_prod];
5216 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5217 len, PCI_DMA_TODEVICE);
5218 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5221 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5222 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5223 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5224 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5227 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5229 prod = NEXT_TX_BD(prod);
5230 bp->tx_prod_bseq += skb->len;
5232 REG_WR16(bp, bp->tx_bidx_addr, prod);
5233 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5238 dev->trans_start = jiffies;
5240 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5241 netif_stop_queue(dev);
5242 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5243 netif_wake_queue(dev);
5246 return NETDEV_TX_OK;
5249 /* Called with rtnl_lock */
5251 bnx2_close(struct net_device *dev)
5253 struct bnx2 *bp = netdev_priv(dev);
5256 /* Calling flush_scheduled_work() may deadlock because
5257 * linkwatch_event() may be on the workqueue and it will try to get
5258 * the rtnl_lock which we are holding.
5260 while (bp->in_reset_task)
5263 bnx2_netif_stop(bp);
5264 del_timer_sync(&bp->timer);
5265 if (bp->flags & NO_WOL_FLAG)
5266 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5268 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5270 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5271 bnx2_reset_chip(bp, reset_code);
5276 netif_carrier_off(bp->dev);
5277 bnx2_set_power_state(bp, PCI_D3hot);
5281 #define GET_NET_STATS64(ctr) \
5282 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5283 (unsigned long) (ctr##_lo)
5285 #define GET_NET_STATS32(ctr) \
5288 #if (BITS_PER_LONG == 64)
5289 #define GET_NET_STATS GET_NET_STATS64
5291 #define GET_NET_STATS GET_NET_STATS32
5294 static struct net_device_stats *
5295 bnx2_get_stats(struct net_device *dev)
5297 struct bnx2 *bp = netdev_priv(dev);
5298 struct statistics_block *stats_blk = bp->stats_blk;
5299 struct net_device_stats *net_stats = &bp->net_stats;
5301 if (bp->stats_blk == NULL) {
5304 net_stats->rx_packets =
5305 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5306 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5307 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5309 net_stats->tx_packets =
5310 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5311 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5312 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5314 net_stats->rx_bytes =
5315 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5317 net_stats->tx_bytes =
5318 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5320 net_stats->multicast =
5321 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5323 net_stats->collisions =
5324 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5326 net_stats->rx_length_errors =
5327 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5328 stats_blk->stat_EtherStatsOverrsizePkts);
5330 net_stats->rx_over_errors =
5331 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5333 net_stats->rx_frame_errors =
5334 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5336 net_stats->rx_crc_errors =
5337 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5339 net_stats->rx_errors = net_stats->rx_length_errors +
5340 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5341 net_stats->rx_crc_errors;
5343 net_stats->tx_aborted_errors =
5344 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5345 stats_blk->stat_Dot3StatsLateCollisions);
5347 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5348 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5349 net_stats->tx_carrier_errors = 0;
5351 net_stats->tx_carrier_errors =
5353 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5356 net_stats->tx_errors =
5358 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5360 net_stats->tx_aborted_errors +
5361 net_stats->tx_carrier_errors;
5363 net_stats->rx_missed_errors =
5364 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5365 stats_blk->stat_FwRxDrop);
5370 /* All ethtool functions called with rtnl_lock */
5373 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5375 struct bnx2 *bp = netdev_priv(dev);
5376 int support_serdes = 0, support_copper = 0;
5378 cmd->supported = SUPPORTED_Autoneg;
5379 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5382 } else if (bp->phy_port == PORT_FIBRE)
5387 if (support_serdes) {
5388 cmd->supported |= SUPPORTED_1000baseT_Full |
5390 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5391 cmd->supported |= SUPPORTED_2500baseX_Full;
5394 if (support_copper) {
5395 cmd->supported |= SUPPORTED_10baseT_Half |
5396 SUPPORTED_10baseT_Full |
5397 SUPPORTED_100baseT_Half |
5398 SUPPORTED_100baseT_Full |
5399 SUPPORTED_1000baseT_Full |
5404 spin_lock_bh(&bp->phy_lock);
5405 cmd->port = bp->phy_port;
5406 cmd->advertising = bp->advertising;
5408 if (bp->autoneg & AUTONEG_SPEED) {
5409 cmd->autoneg = AUTONEG_ENABLE;
5412 cmd->autoneg = AUTONEG_DISABLE;
5415 if (netif_carrier_ok(dev)) {
5416 cmd->speed = bp->line_speed;
5417 cmd->duplex = bp->duplex;
5423 spin_unlock_bh(&bp->phy_lock);
5425 cmd->transceiver = XCVR_INTERNAL;
5426 cmd->phy_address = bp->phy_addr;
5432 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5434 struct bnx2 *bp = netdev_priv(dev);
5435 u8 autoneg = bp->autoneg;
5436 u8 req_duplex = bp->req_duplex;
5437 u16 req_line_speed = bp->req_line_speed;
5438 u32 advertising = bp->advertising;
5441 spin_lock_bh(&bp->phy_lock);
5443 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5444 goto err_out_unlock;
5446 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5447 goto err_out_unlock;
5449 if (cmd->autoneg == AUTONEG_ENABLE) {
5450 autoneg |= AUTONEG_SPEED;
5452 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5454 /* allow advertising 1 speed */
5455 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5456 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5457 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5458 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5460 if (cmd->port == PORT_FIBRE)
5461 goto err_out_unlock;
5463 advertising = cmd->advertising;
5465 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5466 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5467 (cmd->port == PORT_TP))
5468 goto err_out_unlock;
5469 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5470 advertising = cmd->advertising;
5471 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5472 goto err_out_unlock;
5474 if (cmd->port == PORT_FIBRE)
5475 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5477 advertising = ETHTOOL_ALL_COPPER_SPEED;
5479 advertising |= ADVERTISED_Autoneg;
5482 if (cmd->port == PORT_FIBRE) {
5483 if ((cmd->speed != SPEED_1000 &&
5484 cmd->speed != SPEED_2500) ||
5485 (cmd->duplex != DUPLEX_FULL))
5486 goto err_out_unlock;
5488 if (cmd->speed == SPEED_2500 &&
5489 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5490 goto err_out_unlock;
5492 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5493 goto err_out_unlock;
5495 autoneg &= ~AUTONEG_SPEED;
5496 req_line_speed = cmd->speed;
5497 req_duplex = cmd->duplex;
5501 bp->autoneg = autoneg;
5502 bp->advertising = advertising;
5503 bp->req_line_speed = req_line_speed;
5504 bp->req_duplex = req_duplex;
5506 err = bnx2_setup_phy(bp, cmd->port);
5509 spin_unlock_bh(&bp->phy_lock);
5515 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5517 struct bnx2 *bp = netdev_priv(dev);
5519 strcpy(info->driver, DRV_MODULE_NAME);
5520 strcpy(info->version, DRV_MODULE_VERSION);
5521 strcpy(info->bus_info, pci_name(bp->pdev));
5522 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5523 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5524 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
5525 info->fw_version[1] = info->fw_version[3] = '.';
5526 info->fw_version[5] = 0;
5529 #define BNX2_REGDUMP_LEN (32 * 1024)
5532 bnx2_get_regs_len(struct net_device *dev)
5534 return BNX2_REGDUMP_LEN;
5538 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5540 u32 *p = _p, i, offset;
5542 struct bnx2 *bp = netdev_priv(dev);
5543 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5544 0x0800, 0x0880, 0x0c00, 0x0c10,
5545 0x0c30, 0x0d08, 0x1000, 0x101c,
5546 0x1040, 0x1048, 0x1080, 0x10a4,
5547 0x1400, 0x1490, 0x1498, 0x14f0,
5548 0x1500, 0x155c, 0x1580, 0x15dc,
5549 0x1600, 0x1658, 0x1680, 0x16d8,
5550 0x1800, 0x1820, 0x1840, 0x1854,
5551 0x1880, 0x1894, 0x1900, 0x1984,
5552 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5553 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5554 0x2000, 0x2030, 0x23c0, 0x2400,
5555 0x2800, 0x2820, 0x2830, 0x2850,
5556 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5557 0x3c00, 0x3c94, 0x4000, 0x4010,
5558 0x4080, 0x4090, 0x43c0, 0x4458,
5559 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5560 0x4fc0, 0x5010, 0x53c0, 0x5444,
5561 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5562 0x5fc0, 0x6000, 0x6400, 0x6428,
5563 0x6800, 0x6848, 0x684c, 0x6860,
5564 0x6888, 0x6910, 0x8000 };
5568 memset(p, 0, BNX2_REGDUMP_LEN);
5570 if (!netif_running(bp->dev))
5574 offset = reg_boundaries[0];
5576 while (offset < BNX2_REGDUMP_LEN) {
5577 *p++ = REG_RD(bp, offset);
5579 if (offset == reg_boundaries[i + 1]) {
5580 offset = reg_boundaries[i + 2];
5581 p = (u32 *) (orig_p + offset);
5588 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5590 struct bnx2 *bp = netdev_priv(dev);
5592 if (bp->flags & NO_WOL_FLAG) {
5597 wol->supported = WAKE_MAGIC;
5599 wol->wolopts = WAKE_MAGIC;
5603 memset(&wol->sopass, 0, sizeof(wol->sopass));
5607 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5609 struct bnx2 *bp = netdev_priv(dev);
5611 if (wol->wolopts & ~WAKE_MAGIC)
5614 if (wol->wolopts & WAKE_MAGIC) {
5615 if (bp->flags & NO_WOL_FLAG)
5627 bnx2_nway_reset(struct net_device *dev)
5629 struct bnx2 *bp = netdev_priv(dev);
5632 if (!(bp->autoneg & AUTONEG_SPEED)) {
5636 spin_lock_bh(&bp->phy_lock);
5638 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5641 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5642 spin_unlock_bh(&bp->phy_lock);
5646 /* Force a link down visible on the other side */
5647 if (bp->phy_flags & PHY_SERDES_FLAG) {
5648 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5649 spin_unlock_bh(&bp->phy_lock);
5653 spin_lock_bh(&bp->phy_lock);
5655 bp->current_interval = SERDES_AN_TIMEOUT;
5656 bp->serdes_an_pending = 1;
5657 mod_timer(&bp->timer, jiffies + bp->current_interval);
5660 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5661 bmcr &= ~BMCR_LOOPBACK;
5662 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5664 spin_unlock_bh(&bp->phy_lock);
5670 bnx2_get_eeprom_len(struct net_device *dev)
5672 struct bnx2 *bp = netdev_priv(dev);
5674 if (bp->flash_info == NULL)
5677 return (int) bp->flash_size;
5681 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5684 struct bnx2 *bp = netdev_priv(dev);
5687 /* parameters already validated in ethtool_get_eeprom */
5689 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5695 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5698 struct bnx2 *bp = netdev_priv(dev);
5701 /* parameters already validated in ethtool_set_eeprom */
5703 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5709 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5711 struct bnx2 *bp = netdev_priv(dev);
5713 memset(coal, 0, sizeof(struct ethtool_coalesce));
5715 coal->rx_coalesce_usecs = bp->rx_ticks;
5716 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5717 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5718 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5720 coal->tx_coalesce_usecs = bp->tx_ticks;
5721 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5722 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5723 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5725 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5731 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5733 struct bnx2 *bp = netdev_priv(dev);
5735 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5736 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5738 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5739 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5741 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5742 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5744 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5745 if (bp->rx_quick_cons_trip_int > 0xff)
5746 bp->rx_quick_cons_trip_int = 0xff;
5748 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5749 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5751 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5752 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5754 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5755 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5757 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5758 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5761 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5762 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5763 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5764 bp->stats_ticks = USEC_PER_SEC;
5766 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5767 bp->stats_ticks &= 0xffff00;
5769 if (netif_running(bp->dev)) {
5770 bnx2_netif_stop(bp);
5772 bnx2_netif_start(bp);
5779 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5781 struct bnx2 *bp = netdev_priv(dev);
5783 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5784 ering->rx_mini_max_pending = 0;
5785 ering->rx_jumbo_max_pending = 0;
5787 ering->rx_pending = bp->rx_ring_size;
5788 ering->rx_mini_pending = 0;
5789 ering->rx_jumbo_pending = 0;
5791 ering->tx_max_pending = MAX_TX_DESC_CNT;
5792 ering->tx_pending = bp->tx_ring_size;
5796 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5798 struct bnx2 *bp = netdev_priv(dev);
5800 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5801 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5802 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5806 if (netif_running(bp->dev)) {
5807 bnx2_netif_stop(bp);
5808 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5813 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5814 bp->tx_ring_size = ering->tx_pending;
5816 if (netif_running(bp->dev)) {
5819 rc = bnx2_alloc_mem(bp);
5823 bnx2_netif_start(bp);
5830 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5832 struct bnx2 *bp = netdev_priv(dev);
5834 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5835 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5836 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5840 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5842 struct bnx2 *bp = netdev_priv(dev);
5844 bp->req_flow_ctrl = 0;
5845 if (epause->rx_pause)
5846 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5847 if (epause->tx_pause)
5848 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5850 if (epause->autoneg) {
5851 bp->autoneg |= AUTONEG_FLOW_CTRL;
5854 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5857 spin_lock_bh(&bp->phy_lock);
5859 bnx2_setup_phy(bp, bp->phy_port);
5861 spin_unlock_bh(&bp->phy_lock);
5867 bnx2_get_rx_csum(struct net_device *dev)
5869 struct bnx2 *bp = netdev_priv(dev);
5875 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5877 struct bnx2 *bp = netdev_priv(dev);
5884 bnx2_set_tso(struct net_device *dev, u32 data)
5886 struct bnx2 *bp = netdev_priv(dev);
5889 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5890 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5891 dev->features |= NETIF_F_TSO6;
5893 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5898 #define BNX2_NUM_STATS 46
5901 char string[ETH_GSTRING_LEN];
5902 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5904 { "rx_error_bytes" },
5906 { "tx_error_bytes" },
5907 { "rx_ucast_packets" },
5908 { "rx_mcast_packets" },
5909 { "rx_bcast_packets" },
5910 { "tx_ucast_packets" },
5911 { "tx_mcast_packets" },
5912 { "tx_bcast_packets" },
5913 { "tx_mac_errors" },
5914 { "tx_carrier_errors" },
5915 { "rx_crc_errors" },
5916 { "rx_align_errors" },
5917 { "tx_single_collisions" },
5918 { "tx_multi_collisions" },
5920 { "tx_excess_collisions" },
5921 { "tx_late_collisions" },
5922 { "tx_total_collisions" },
5925 { "rx_undersize_packets" },
5926 { "rx_oversize_packets" },
5927 { "rx_64_byte_packets" },
5928 { "rx_65_to_127_byte_packets" },
5929 { "rx_128_to_255_byte_packets" },
5930 { "rx_256_to_511_byte_packets" },
5931 { "rx_512_to_1023_byte_packets" },
5932 { "rx_1024_to_1522_byte_packets" },
5933 { "rx_1523_to_9022_byte_packets" },
5934 { "tx_64_byte_packets" },
5935 { "tx_65_to_127_byte_packets" },
5936 { "tx_128_to_255_byte_packets" },
5937 { "tx_256_to_511_byte_packets" },
5938 { "tx_512_to_1023_byte_packets" },
5939 { "tx_1024_to_1522_byte_packets" },
5940 { "tx_1523_to_9022_byte_packets" },
5941 { "rx_xon_frames" },
5942 { "rx_xoff_frames" },
5943 { "tx_xon_frames" },
5944 { "tx_xoff_frames" },
5945 { "rx_mac_ctrl_frames" },
5946 { "rx_filtered_packets" },
5948 { "rx_fw_discards" },
5951 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5953 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5954 STATS_OFFSET32(stat_IfHCInOctets_hi),
5955 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5956 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5957 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5958 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5959 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5960 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5961 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5962 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5963 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5964 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5965 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5966 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5967 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5968 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5969 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5970 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5971 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5972 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5973 STATS_OFFSET32(stat_EtherStatsCollisions),
5974 STATS_OFFSET32(stat_EtherStatsFragments),
5975 STATS_OFFSET32(stat_EtherStatsJabbers),
5976 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5977 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5978 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5979 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5980 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5981 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5982 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5983 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5984 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5985 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5986 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5987 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5988 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5989 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5990 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5991 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5992 STATS_OFFSET32(stat_XonPauseFramesReceived),
5993 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5994 STATS_OFFSET32(stat_OutXonSent),
5995 STATS_OFFSET32(stat_OutXoffSent),
5996 STATS_OFFSET32(stat_MacControlFramesReceived),
5997 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5998 STATS_OFFSET32(stat_IfInMBUFDiscards),
5999 STATS_OFFSET32(stat_FwRxDrop),
6002 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6003 * skipped because of errata.
6005 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6006 8,0,8,8,8,8,8,8,8,8,
6007 4,0,4,4,4,4,4,4,4,4,
6008 4,4,4,4,4,4,4,4,4,4,
6009 4,4,4,4,4,4,4,4,4,4,
6013 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6014 8,0,8,8,8,8,8,8,8,8,
6015 4,4,4,4,4,4,4,4,4,4,
6016 4,4,4,4,4,4,4,4,4,4,
6017 4,4,4,4,4,4,4,4,4,4,
6021 #define BNX2_NUM_TESTS 6
6024 char string[ETH_GSTRING_LEN];
6025 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6026 { "register_test (offline)" },
6027 { "memory_test (offline)" },
6028 { "loopback_test (offline)" },
6029 { "nvram_test (online)" },
6030 { "interrupt_test (online)" },
6031 { "link_test (online)" },
6035 bnx2_self_test_count(struct net_device *dev)
6037 return BNX2_NUM_TESTS;
6041 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6043 struct bnx2 *bp = netdev_priv(dev);
6045 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6046 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6049 bnx2_netif_stop(bp);
6050 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6053 if (bnx2_test_registers(bp) != 0) {
6055 etest->flags |= ETH_TEST_FL_FAILED;
6057 if (bnx2_test_memory(bp) != 0) {
6059 etest->flags |= ETH_TEST_FL_FAILED;
6061 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6062 etest->flags |= ETH_TEST_FL_FAILED;
6064 if (!netif_running(bp->dev)) {
6065 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6069 bnx2_netif_start(bp);
6072 /* wait for link up */
6073 for (i = 0; i < 7; i++) {
6076 msleep_interruptible(1000);
6080 if (bnx2_test_nvram(bp) != 0) {
6082 etest->flags |= ETH_TEST_FL_FAILED;
6084 if (bnx2_test_intr(bp) != 0) {
6086 etest->flags |= ETH_TEST_FL_FAILED;
6089 if (bnx2_test_link(bp) != 0) {
6091 etest->flags |= ETH_TEST_FL_FAILED;
6097 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6099 switch (stringset) {
6101 memcpy(buf, bnx2_stats_str_arr,
6102 sizeof(bnx2_stats_str_arr));
6105 memcpy(buf, bnx2_tests_str_arr,
6106 sizeof(bnx2_tests_str_arr));
6112 bnx2_get_stats_count(struct net_device *dev)
6114 return BNX2_NUM_STATS;
6118 bnx2_get_ethtool_stats(struct net_device *dev,
6119 struct ethtool_stats *stats, u64 *buf)
6121 struct bnx2 *bp = netdev_priv(dev);
6123 u32 *hw_stats = (u32 *) bp->stats_blk;
6124 u8 *stats_len_arr = NULL;
6126 if (hw_stats == NULL) {
6127 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6131 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6132 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6133 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6134 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6135 stats_len_arr = bnx2_5706_stats_len_arr;
6137 stats_len_arr = bnx2_5708_stats_len_arr;
6139 for (i = 0; i < BNX2_NUM_STATS; i++) {
6140 if (stats_len_arr[i] == 0) {
6141 /* skip this counter */
6145 if (stats_len_arr[i] == 4) {
6146 /* 4-byte counter */
6148 *(hw_stats + bnx2_stats_offset_arr[i]);
6151 /* 8-byte counter */
6152 buf[i] = (((u64) *(hw_stats +
6153 bnx2_stats_offset_arr[i])) << 32) +
6154 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6159 bnx2_phys_id(struct net_device *dev, u32 data)
6161 struct bnx2 *bp = netdev_priv(dev);
6168 save = REG_RD(bp, BNX2_MISC_CFG);
6169 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6171 for (i = 0; i < (data * 2); i++) {
6173 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6176 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6177 BNX2_EMAC_LED_1000MB_OVERRIDE |
6178 BNX2_EMAC_LED_100MB_OVERRIDE |
6179 BNX2_EMAC_LED_10MB_OVERRIDE |
6180 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6181 BNX2_EMAC_LED_TRAFFIC);
6183 msleep_interruptible(500);
6184 if (signal_pending(current))
6187 REG_WR(bp, BNX2_EMAC_LED, 0);
6188 REG_WR(bp, BNX2_MISC_CFG, save);
6193 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6195 struct bnx2 *bp = netdev_priv(dev);
6197 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6198 return (ethtool_op_set_tx_hw_csum(dev, data));
6200 return (ethtool_op_set_tx_csum(dev, data));
6203 static const struct ethtool_ops bnx2_ethtool_ops = {
6204 .get_settings = bnx2_get_settings,
6205 .set_settings = bnx2_set_settings,
6206 .get_drvinfo = bnx2_get_drvinfo,
6207 .get_regs_len = bnx2_get_regs_len,
6208 .get_regs = bnx2_get_regs,
6209 .get_wol = bnx2_get_wol,
6210 .set_wol = bnx2_set_wol,
6211 .nway_reset = bnx2_nway_reset,
6212 .get_link = ethtool_op_get_link,
6213 .get_eeprom_len = bnx2_get_eeprom_len,
6214 .get_eeprom = bnx2_get_eeprom,
6215 .set_eeprom = bnx2_set_eeprom,
6216 .get_coalesce = bnx2_get_coalesce,
6217 .set_coalesce = bnx2_set_coalesce,
6218 .get_ringparam = bnx2_get_ringparam,
6219 .set_ringparam = bnx2_set_ringparam,
6220 .get_pauseparam = bnx2_get_pauseparam,
6221 .set_pauseparam = bnx2_set_pauseparam,
6222 .get_rx_csum = bnx2_get_rx_csum,
6223 .set_rx_csum = bnx2_set_rx_csum,
6224 .get_tx_csum = ethtool_op_get_tx_csum,
6225 .set_tx_csum = bnx2_set_tx_csum,
6226 .get_sg = ethtool_op_get_sg,
6227 .set_sg = ethtool_op_set_sg,
6228 .get_tso = ethtool_op_get_tso,
6229 .set_tso = bnx2_set_tso,
6230 .self_test_count = bnx2_self_test_count,
6231 .self_test = bnx2_self_test,
6232 .get_strings = bnx2_get_strings,
6233 .phys_id = bnx2_phys_id,
6234 .get_stats_count = bnx2_get_stats_count,
6235 .get_ethtool_stats = bnx2_get_ethtool_stats,
6236 .get_perm_addr = ethtool_op_get_perm_addr,
6239 /* Called with rtnl_lock */
6241 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6243 struct mii_ioctl_data *data = if_mii(ifr);
6244 struct bnx2 *bp = netdev_priv(dev);
6249 data->phy_id = bp->phy_addr;
6255 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6258 if (!netif_running(dev))
6261 spin_lock_bh(&bp->phy_lock);
6262 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6263 spin_unlock_bh(&bp->phy_lock);
6265 data->val_out = mii_regval;
6271 if (!capable(CAP_NET_ADMIN))
6274 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6277 if (!netif_running(dev))
6280 spin_lock_bh(&bp->phy_lock);
6281 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6282 spin_unlock_bh(&bp->phy_lock);
6293 /* Called with rtnl_lock */
6295 bnx2_change_mac_addr(struct net_device *dev, void *p)
6297 struct sockaddr *addr = p;
6298 struct bnx2 *bp = netdev_priv(dev);
6300 if (!is_valid_ether_addr(addr->sa_data))
6303 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6304 if (netif_running(dev))
6305 bnx2_set_mac_addr(bp);
6310 /* Called with rtnl_lock */
6312 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6314 struct bnx2 *bp = netdev_priv(dev);
6316 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6317 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6321 if (netif_running(dev)) {
6322 bnx2_netif_stop(bp);
6326 bnx2_netif_start(bp);
6331 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6333 poll_bnx2(struct net_device *dev)
6335 struct bnx2 *bp = netdev_priv(dev);
6337 disable_irq(bp->pdev->irq);
6338 bnx2_interrupt(bp->pdev->irq, dev);
6339 enable_irq(bp->pdev->irq);
6343 static void __devinit
6344 bnx2_get_5709_media(struct bnx2 *bp)
6346 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6347 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6350 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6352 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6353 bp->phy_flags |= PHY_SERDES_FLAG;
6357 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6358 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6360 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6362 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6367 bp->phy_flags |= PHY_SERDES_FLAG;
6375 bp->phy_flags |= PHY_SERDES_FLAG;
6381 static void __devinit
6382 bnx2_get_pci_speed(struct bnx2 *bp)
6386 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6387 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6390 bp->flags |= PCIX_FLAG;
6392 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6394 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6396 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6397 bp->bus_speed_mhz = 133;
6400 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6401 bp->bus_speed_mhz = 100;
6404 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6405 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6406 bp->bus_speed_mhz = 66;
6409 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6410 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6411 bp->bus_speed_mhz = 50;
6414 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6415 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6416 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6417 bp->bus_speed_mhz = 33;
6422 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6423 bp->bus_speed_mhz = 66;
6425 bp->bus_speed_mhz = 33;
6428 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6429 bp->flags |= PCI_32BIT_FLAG;
6433 static int __devinit
6434 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6437 unsigned long mem_len;
6440 u64 dma_mask, persist_dma_mask;
6442 SET_MODULE_OWNER(dev);
6443 SET_NETDEV_DEV(dev, &pdev->dev);
6444 bp = netdev_priv(dev);
6449 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6450 rc = pci_enable_device(pdev);
6452 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
6456 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6458 "Cannot find PCI device base address, aborting.\n");
6460 goto err_out_disable;
6463 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6465 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6466 goto err_out_disable;
6469 pci_set_master(pdev);
6471 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6472 if (bp->pm_cap == 0) {
6474 "Cannot find power management capability, aborting.\n");
6476 goto err_out_release;
6482 spin_lock_init(&bp->phy_lock);
6483 spin_lock_init(&bp->indirect_lock);
6484 INIT_WORK(&bp->reset_task, bnx2_reset_task);
6486 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6487 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6488 dev->mem_end = dev->mem_start + mem_len;
6489 dev->irq = pdev->irq;
6491 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6494 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6496 goto err_out_release;
6499 /* Configure byte swap and enable write to the reg_window registers.
6500 * Rely on CPU to do target byte swapping on big endian systems
6501 * The chip's target access swapping will not swap all accesses
6503 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6504 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6505 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6507 bnx2_set_power_state(bp, PCI_D0);
6509 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6511 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6512 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6514 "Cannot find PCIE capability, aborting.\n");
6518 bp->flags |= PCIE_FLAG;
6520 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6521 if (bp->pcix_cap == 0) {
6523 "Cannot find PCIX capability, aborting.\n");
6529 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6530 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6531 bp->flags |= MSI_CAP_FLAG;
6534 /* 5708 cannot support DMA addresses > 40-bit. */
6535 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6536 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6538 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6540 /* Configure DMA attributes. */
6541 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6542 dev->features |= NETIF_F_HIGHDMA;
6543 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6546 "pci_set_consistent_dma_mask failed, aborting.\n");
6549 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6550 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6554 if (!(bp->flags & PCIE_FLAG))
6555 bnx2_get_pci_speed(bp);
6557 /* 5706A0 may falsely detect SERR and PERR. */
6558 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6559 reg = REG_RD(bp, PCI_COMMAND);
6560 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6561 REG_WR(bp, PCI_COMMAND, reg);
6563 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6564 !(bp->flags & PCIX_FLAG)) {
6567 "5706 A1 can only be used in a PCIX bus, aborting.\n");
6571 bnx2_init_nvram(bp);
6573 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6575 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6576 BNX2_SHM_HDR_SIGNATURE_SIG) {
6577 u32 off = PCI_FUNC(pdev->devfn) << 2;
6579 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6581 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6583 /* Get the permanent MAC address. First we need to make sure the
6584 * firmware is actually running.
6586 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6588 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6589 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6590 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6595 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6597 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6598 bp->mac_addr[0] = (u8) (reg >> 8);
6599 bp->mac_addr[1] = (u8) reg;
6601 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6602 bp->mac_addr[2] = (u8) (reg >> 24);
6603 bp->mac_addr[3] = (u8) (reg >> 16);
6604 bp->mac_addr[4] = (u8) (reg >> 8);
6605 bp->mac_addr[5] = (u8) reg;
6607 bp->tx_ring_size = MAX_TX_DESC_CNT;
6608 bnx2_set_rx_ring_size(bp, 255);
6612 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6614 bp->tx_quick_cons_trip_int = 20;
6615 bp->tx_quick_cons_trip = 20;
6616 bp->tx_ticks_int = 80;
6619 bp->rx_quick_cons_trip_int = 6;
6620 bp->rx_quick_cons_trip = 6;
6621 bp->rx_ticks_int = 18;
6624 bp->stats_ticks = 1000000 & 0xffff00;
6626 bp->timer_interval = HZ;
6627 bp->current_interval = HZ;
6631 /* Disable WOL support if we are running on a SERDES chip. */
6632 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6633 bnx2_get_5709_media(bp);
6634 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6635 bp->phy_flags |= PHY_SERDES_FLAG;
6637 bp->phy_port = PORT_TP;
6638 if (bp->phy_flags & PHY_SERDES_FLAG) {
6639 bp->phy_port = PORT_FIBRE;
6640 bp->flags |= NO_WOL_FLAG;
6641 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6643 reg = REG_RD_IND(bp, bp->shmem_base +
6644 BNX2_SHARED_HW_CFG_CONFIG);
6645 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6646 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6648 bnx2_init_remote_phy(bp);
6650 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6651 CHIP_NUM(bp) == CHIP_NUM_5708)
6652 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6653 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6654 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6656 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6657 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6658 (CHIP_ID(bp) == CHIP_ID_5708_B1))
6659 bp->flags |= NO_WOL_FLAG;
6661 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6662 bp->tx_quick_cons_trip_int =
6663 bp->tx_quick_cons_trip;
6664 bp->tx_ticks_int = bp->tx_ticks;
6665 bp->rx_quick_cons_trip_int =
6666 bp->rx_quick_cons_trip;
6667 bp->rx_ticks_int = bp->rx_ticks;
6668 bp->comp_prod_trip_int = bp->comp_prod_trip;
6669 bp->com_ticks_int = bp->com_ticks;
6670 bp->cmd_ticks_int = bp->cmd_ticks;
6673 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6675 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6676 * with byte enables disabled on the unused 32-bit word. This is legal
6677 * but causes problems on the AMD 8132 which will eventually stop
6678 * responding after a while.
6680 * AMD believes this incompatibility is unique to the 5706, and
6681 * prefers to locally disable MSI rather than globally disabling it.
6683 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6684 struct pci_dev *amd_8132 = NULL;
6686 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6687 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6691 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6692 if (rev >= 0x10 && rev <= 0x13) {
6694 pci_dev_put(amd_8132);
6700 bnx2_set_default_link(bp);
6701 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6703 init_timer(&bp->timer);
6704 bp->timer.expires = RUN_AT(bp->timer_interval);
6705 bp->timer.data = (unsigned long) bp;
6706 bp->timer.function = bnx2_timer;
6712 iounmap(bp->regview);
6717 pci_release_regions(pdev);
6720 pci_disable_device(pdev);
6721 pci_set_drvdata(pdev, NULL);
6727 static char * __devinit
6728 bnx2_bus_string(struct bnx2 *bp, char *str)
6732 if (bp->flags & PCIE_FLAG) {
6733 s += sprintf(s, "PCI Express");
6735 s += sprintf(s, "PCI");
6736 if (bp->flags & PCIX_FLAG)
6737 s += sprintf(s, "-X");
6738 if (bp->flags & PCI_32BIT_FLAG)
6739 s += sprintf(s, " 32-bit");
6741 s += sprintf(s, " 64-bit");
6742 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6747 static int __devinit
6748 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6750 static int version_printed = 0;
6751 struct net_device *dev = NULL;
6756 if (version_printed++ == 0)
6757 printk(KERN_INFO "%s", version);
6759 /* dev zeroed in init_etherdev */
6760 dev = alloc_etherdev(sizeof(*bp));
6765 rc = bnx2_init_board(pdev, dev);
6771 dev->open = bnx2_open;
6772 dev->hard_start_xmit = bnx2_start_xmit;
6773 dev->stop = bnx2_close;
6774 dev->get_stats = bnx2_get_stats;
6775 dev->set_multicast_list = bnx2_set_rx_mode;
6776 dev->do_ioctl = bnx2_ioctl;
6777 dev->set_mac_address = bnx2_change_mac_addr;
6778 dev->change_mtu = bnx2_change_mtu;
6779 dev->tx_timeout = bnx2_tx_timeout;
6780 dev->watchdog_timeo = TX_TIMEOUT;
6782 dev->vlan_rx_register = bnx2_vlan_rx_register;
6784 dev->poll = bnx2_poll;
6785 dev->ethtool_ops = &bnx2_ethtool_ops;
6788 bp = netdev_priv(dev);
6790 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6791 dev->poll_controller = poll_bnx2;
6794 pci_set_drvdata(pdev, dev);
6796 memcpy(dev->dev_addr, bp->mac_addr, 6);
6797 memcpy(dev->perm_addr, bp->mac_addr, 6);
6798 bp->name = board_info[ent->driver_data].name;
6800 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6801 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6802 dev->features |= NETIF_F_IPV6_CSUM;
6805 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6807 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6808 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6809 dev->features |= NETIF_F_TSO6;
6811 if ((rc = register_netdev(dev))) {
6812 dev_err(&pdev->dev, "Cannot register net device\n");
6814 iounmap(bp->regview);
6815 pci_release_regions(pdev);
6816 pci_disable_device(pdev);
6817 pci_set_drvdata(pdev, NULL);
6822 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6826 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6827 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6828 bnx2_bus_string(bp, str),
6832 printk("node addr ");
6833 for (i = 0; i < 6; i++)
6834 printk("%2.2x", dev->dev_addr[i]);
6840 static void __devexit
6841 bnx2_remove_one(struct pci_dev *pdev)
6843 struct net_device *dev = pci_get_drvdata(pdev);
6844 struct bnx2 *bp = netdev_priv(dev);
6846 flush_scheduled_work();
6848 unregister_netdev(dev);
6851 iounmap(bp->regview);
6854 pci_release_regions(pdev);
6855 pci_disable_device(pdev);
6856 pci_set_drvdata(pdev, NULL);
6860 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6862 struct net_device *dev = pci_get_drvdata(pdev);
6863 struct bnx2 *bp = netdev_priv(dev);
6866 if (!netif_running(dev))
6869 flush_scheduled_work();
6870 bnx2_netif_stop(bp);
6871 netif_device_detach(dev);
6872 del_timer_sync(&bp->timer);
6873 if (bp->flags & NO_WOL_FLAG)
6874 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6876 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6878 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6879 bnx2_reset_chip(bp, reset_code);
6881 pci_save_state(pdev);
6882 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6887 bnx2_resume(struct pci_dev *pdev)
6889 struct net_device *dev = pci_get_drvdata(pdev);
6890 struct bnx2 *bp = netdev_priv(dev);
6892 if (!netif_running(dev))
6895 pci_restore_state(pdev);
6896 bnx2_set_power_state(bp, PCI_D0);
6897 netif_device_attach(dev);
6899 bnx2_netif_start(bp);
6903 static struct pci_driver bnx2_pci_driver = {
6904 .name = DRV_MODULE_NAME,
6905 .id_table = bnx2_pci_tbl,
6906 .probe = bnx2_init_one,
6907 .remove = __devexit_p(bnx2_remove_one),
6908 .suspend = bnx2_suspend,
6909 .resume = bnx2_resume,
6912 static int __init bnx2_init(void)
6914 return pci_register_driver(&bnx2_pci_driver);
6917 static void __exit bnx2_cleanup(void)
6919 pci_unregister_driver(&bnx2_pci_driver);
6922 module_init(bnx2_init);
6923 module_exit(bnx2_cleanup);