1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2007 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define DRV_MODULE_NAME "bnx2"
56 #define PFX DRV_MODULE_NAME ": "
57 #define DRV_MODULE_VERSION "1.5.11"
58 #define DRV_MODULE_RELDATE "June 4, 2007"
60 #define RUN_AT(x) (jiffies + (x))
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT (5*HZ)
65 static const char version[] __devinitdata =
66 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
73 static int disable_msi = 0;
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90 /* indexed by board_t, above */
93 } board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105 static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
127 static struct flash_spec flash_table[] =
130 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
131 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
134 /* Expansion entry 0001 */
135 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
139 /* Saifun SA25F010 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
141 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144 "Non-buffered flash (128kB)"},
145 /* Saifun SA25F020 (non-buffered flash) */
146 /* strap, cfg1, & write1 need updates */
147 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150 "Non-buffered flash (256kB)"},
151 /* Expansion entry 0100 */
152 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
156 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
157 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166 /* Saifun SA25F005 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171 "Non-buffered flash (64kB)"},
173 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
177 /* Expansion entry 1001 */
178 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182 /* Expansion entry 1010 */
183 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
187 /* ATMEL AT45DB011B (buffered flash) */
188 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191 "Buffered flash (128kB)"},
192 /* Expansion entry 1100 */
193 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197 /* Expansion entry 1101 */
198 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
202 /* Ateml Expansion entry 1110 */
203 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1110 (Atmel)"},
207 /* ATMEL AT45DB021B (buffered flash) */
208 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211 "Buffered flash (256kB)"},
214 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
216 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
222 /* The ring uses 256 indices for 255 entries, one of them
223 * needs to be skipped.
225 diff = bp->tx_prod - bp->tx_cons;
226 if (unlikely(diff >= TX_DESC_CNT)) {
228 if (diff == TX_DESC_CNT)
229 diff = MAX_TX_DESC_CNT;
231 return (bp->tx_ring_size - diff);
235 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
239 spin_lock_bh(&bp->indirect_lock);
240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242 spin_unlock_bh(&bp->indirect_lock);
247 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
249 spin_lock_bh(&bp->indirect_lock);
250 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
252 spin_unlock_bh(&bp->indirect_lock);
256 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
259 spin_lock_bh(&bp->indirect_lock);
260 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
263 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266 for (i = 0; i < 5; i++) {
268 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
274 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275 REG_WR(bp, BNX2_CTX_DATA, val);
277 spin_unlock_bh(&bp->indirect_lock);
281 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
286 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
290 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
296 val1 = (bp->phy_addr << 21) | (reg << 16) |
297 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298 BNX2_EMAC_MDIO_COMM_START_BUSY;
299 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
301 for (i = 0; i < 50; i++) {
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
308 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
315 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
324 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
328 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
338 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
343 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
347 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
353 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
358 for (i = 0; i < 50; i++) {
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
368 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
373 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
377 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
387 bnx2_disable_int(struct bnx2 *bp)
389 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
395 bnx2_enable_int(struct bnx2 *bp)
397 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
398 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
401 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
402 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
404 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
408 bnx2_disable_int_sync(struct bnx2 *bp)
410 atomic_inc(&bp->intr_sem);
411 bnx2_disable_int(bp);
412 synchronize_irq(bp->pdev->irq);
416 bnx2_netif_stop(struct bnx2 *bp)
418 bnx2_disable_int_sync(bp);
419 if (netif_running(bp->dev)) {
420 netif_poll_disable(bp->dev);
421 netif_tx_disable(bp->dev);
422 bp->dev->trans_start = jiffies; /* prevent tx timeout */
427 bnx2_netif_start(struct bnx2 *bp)
429 if (atomic_dec_and_test(&bp->intr_sem)) {
430 if (netif_running(bp->dev)) {
431 netif_wake_queue(bp->dev);
432 netif_poll_enable(bp->dev);
439 bnx2_free_mem(struct bnx2 *bp)
443 for (i = 0; i < bp->ctx_pages; i++) {
444 if (bp->ctx_blk[i]) {
445 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
447 bp->ctx_blk_mapping[i]);
448 bp->ctx_blk[i] = NULL;
451 if (bp->status_blk) {
452 pci_free_consistent(bp->pdev, bp->status_stats_size,
453 bp->status_blk, bp->status_blk_mapping);
454 bp->status_blk = NULL;
455 bp->stats_blk = NULL;
457 if (bp->tx_desc_ring) {
458 pci_free_consistent(bp->pdev,
459 sizeof(struct tx_bd) * TX_DESC_CNT,
460 bp->tx_desc_ring, bp->tx_desc_mapping);
461 bp->tx_desc_ring = NULL;
463 kfree(bp->tx_buf_ring);
464 bp->tx_buf_ring = NULL;
465 for (i = 0; i < bp->rx_max_ring; i++) {
466 if (bp->rx_desc_ring[i])
467 pci_free_consistent(bp->pdev,
468 sizeof(struct rx_bd) * RX_DESC_CNT,
470 bp->rx_desc_mapping[i]);
471 bp->rx_desc_ring[i] = NULL;
473 vfree(bp->rx_buf_ring);
474 bp->rx_buf_ring = NULL;
478 bnx2_alloc_mem(struct bnx2 *bp)
480 int i, status_blk_size;
482 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
484 if (bp->tx_buf_ring == NULL)
487 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488 sizeof(struct tx_bd) *
490 &bp->tx_desc_mapping);
491 if (bp->tx_desc_ring == NULL)
494 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
496 if (bp->rx_buf_ring == NULL)
499 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
502 for (i = 0; i < bp->rx_max_ring; i++) {
503 bp->rx_desc_ring[i] =
504 pci_alloc_consistent(bp->pdev,
505 sizeof(struct rx_bd) * RX_DESC_CNT,
506 &bp->rx_desc_mapping[i]);
507 if (bp->rx_desc_ring[i] == NULL)
512 /* Combine status and statistics blocks into one allocation. */
513 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514 bp->status_stats_size = status_blk_size +
515 sizeof(struct statistics_block);
517 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
518 &bp->status_blk_mapping);
519 if (bp->status_blk == NULL)
522 memset(bp->status_blk, 0, bp->status_stats_size);
524 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
527 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531 if (bp->ctx_pages == 0)
533 for (i = 0; i < bp->ctx_pages; i++) {
534 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
536 &bp->ctx_blk_mapping[i]);
537 if (bp->ctx_blk[i] == NULL)
549 bnx2_report_fw_link(struct bnx2 *bp)
551 u32 fw_link_status = 0;
553 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
559 switch (bp->line_speed) {
561 if (bp->duplex == DUPLEX_HALF)
562 fw_link_status = BNX2_LINK_STATUS_10HALF;
564 fw_link_status = BNX2_LINK_STATUS_10FULL;
567 if (bp->duplex == DUPLEX_HALF)
568 fw_link_status = BNX2_LINK_STATUS_100HALF;
570 fw_link_status = BNX2_LINK_STATUS_100FULL;
573 if (bp->duplex == DUPLEX_HALF)
574 fw_link_status = BNX2_LINK_STATUS_1000HALF;
576 fw_link_status = BNX2_LINK_STATUS_1000FULL;
579 if (bp->duplex == DUPLEX_HALF)
580 fw_link_status = BNX2_LINK_STATUS_2500HALF;
582 fw_link_status = BNX2_LINK_STATUS_2500FULL;
586 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
589 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
591 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
592 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
594 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
595 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
596 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
598 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
602 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
604 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
608 bnx2_report_link(struct bnx2 *bp)
611 netif_carrier_on(bp->dev);
612 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
614 printk("%d Mbps ", bp->line_speed);
616 if (bp->duplex == DUPLEX_FULL)
617 printk("full duplex");
619 printk("half duplex");
622 if (bp->flow_ctrl & FLOW_CTRL_RX) {
623 printk(", receive ");
624 if (bp->flow_ctrl & FLOW_CTRL_TX)
625 printk("& transmit ");
628 printk(", transmit ");
630 printk("flow control ON");
635 netif_carrier_off(bp->dev);
636 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
639 bnx2_report_fw_link(bp);
643 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
645 u32 local_adv, remote_adv;
648 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
649 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
651 if (bp->duplex == DUPLEX_FULL) {
652 bp->flow_ctrl = bp->req_flow_ctrl;
657 if (bp->duplex != DUPLEX_FULL) {
661 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
662 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
665 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
666 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
667 bp->flow_ctrl |= FLOW_CTRL_TX;
668 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
669 bp->flow_ctrl |= FLOW_CTRL_RX;
673 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
674 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
676 if (bp->phy_flags & PHY_SERDES_FLAG) {
677 u32 new_local_adv = 0;
678 u32 new_remote_adv = 0;
680 if (local_adv & ADVERTISE_1000XPAUSE)
681 new_local_adv |= ADVERTISE_PAUSE_CAP;
682 if (local_adv & ADVERTISE_1000XPSE_ASYM)
683 new_local_adv |= ADVERTISE_PAUSE_ASYM;
684 if (remote_adv & ADVERTISE_1000XPAUSE)
685 new_remote_adv |= ADVERTISE_PAUSE_CAP;
686 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
687 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
689 local_adv = new_local_adv;
690 remote_adv = new_remote_adv;
693 /* See Table 28B-3 of 802.3ab-1999 spec. */
694 if (local_adv & ADVERTISE_PAUSE_CAP) {
695 if(local_adv & ADVERTISE_PAUSE_ASYM) {
696 if (remote_adv & ADVERTISE_PAUSE_CAP) {
697 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
699 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
700 bp->flow_ctrl = FLOW_CTRL_RX;
704 if (remote_adv & ADVERTISE_PAUSE_CAP) {
705 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
709 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
710 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
711 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
713 bp->flow_ctrl = FLOW_CTRL_TX;
719 bnx2_5709s_linkup(struct bnx2 *bp)
725 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
726 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
727 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
729 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
730 bp->line_speed = bp->req_line_speed;
731 bp->duplex = bp->req_duplex;
734 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
736 case MII_BNX2_GP_TOP_AN_SPEED_10:
737 bp->line_speed = SPEED_10;
739 case MII_BNX2_GP_TOP_AN_SPEED_100:
740 bp->line_speed = SPEED_100;
742 case MII_BNX2_GP_TOP_AN_SPEED_1G:
743 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
744 bp->line_speed = SPEED_1000;
746 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
747 bp->line_speed = SPEED_2500;
750 if (val & MII_BNX2_GP_TOP_AN_FD)
751 bp->duplex = DUPLEX_FULL;
753 bp->duplex = DUPLEX_HALF;
758 bnx2_5708s_linkup(struct bnx2 *bp)
763 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
764 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
765 case BCM5708S_1000X_STAT1_SPEED_10:
766 bp->line_speed = SPEED_10;
768 case BCM5708S_1000X_STAT1_SPEED_100:
769 bp->line_speed = SPEED_100;
771 case BCM5708S_1000X_STAT1_SPEED_1G:
772 bp->line_speed = SPEED_1000;
774 case BCM5708S_1000X_STAT1_SPEED_2G5:
775 bp->line_speed = SPEED_2500;
778 if (val & BCM5708S_1000X_STAT1_FD)
779 bp->duplex = DUPLEX_FULL;
781 bp->duplex = DUPLEX_HALF;
787 bnx2_5706s_linkup(struct bnx2 *bp)
789 u32 bmcr, local_adv, remote_adv, common;
792 bp->line_speed = SPEED_1000;
794 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
795 if (bmcr & BMCR_FULLDPLX) {
796 bp->duplex = DUPLEX_FULL;
799 bp->duplex = DUPLEX_HALF;
802 if (!(bmcr & BMCR_ANENABLE)) {
806 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
807 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
809 common = local_adv & remote_adv;
810 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
812 if (common & ADVERTISE_1000XFULL) {
813 bp->duplex = DUPLEX_FULL;
816 bp->duplex = DUPLEX_HALF;
824 bnx2_copper_linkup(struct bnx2 *bp)
828 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
829 if (bmcr & BMCR_ANENABLE) {
830 u32 local_adv, remote_adv, common;
832 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
833 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
835 common = local_adv & (remote_adv >> 2);
836 if (common & ADVERTISE_1000FULL) {
837 bp->line_speed = SPEED_1000;
838 bp->duplex = DUPLEX_FULL;
840 else if (common & ADVERTISE_1000HALF) {
841 bp->line_speed = SPEED_1000;
842 bp->duplex = DUPLEX_HALF;
845 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
846 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
848 common = local_adv & remote_adv;
849 if (common & ADVERTISE_100FULL) {
850 bp->line_speed = SPEED_100;
851 bp->duplex = DUPLEX_FULL;
853 else if (common & ADVERTISE_100HALF) {
854 bp->line_speed = SPEED_100;
855 bp->duplex = DUPLEX_HALF;
857 else if (common & ADVERTISE_10FULL) {
858 bp->line_speed = SPEED_10;
859 bp->duplex = DUPLEX_FULL;
861 else if (common & ADVERTISE_10HALF) {
862 bp->line_speed = SPEED_10;
863 bp->duplex = DUPLEX_HALF;
872 if (bmcr & BMCR_SPEED100) {
873 bp->line_speed = SPEED_100;
876 bp->line_speed = SPEED_10;
878 if (bmcr & BMCR_FULLDPLX) {
879 bp->duplex = DUPLEX_FULL;
882 bp->duplex = DUPLEX_HALF;
890 bnx2_set_mac_link(struct bnx2 *bp)
894 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
895 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
896 (bp->duplex == DUPLEX_HALF)) {
897 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
900 /* Configure the EMAC mode register. */
901 val = REG_RD(bp, BNX2_EMAC_MODE);
903 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
904 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
905 BNX2_EMAC_MODE_25G_MODE);
908 switch (bp->line_speed) {
910 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
911 val |= BNX2_EMAC_MODE_PORT_MII_10M;
916 val |= BNX2_EMAC_MODE_PORT_MII;
919 val |= BNX2_EMAC_MODE_25G_MODE;
922 val |= BNX2_EMAC_MODE_PORT_GMII;
927 val |= BNX2_EMAC_MODE_PORT_GMII;
930 /* Set the MAC to operate in the appropriate duplex mode. */
931 if (bp->duplex == DUPLEX_HALF)
932 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
933 REG_WR(bp, BNX2_EMAC_MODE, val);
935 /* Enable/disable rx PAUSE. */
936 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
938 if (bp->flow_ctrl & FLOW_CTRL_RX)
939 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
940 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
942 /* Enable/disable tx PAUSE. */
943 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
944 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
946 if (bp->flow_ctrl & FLOW_CTRL_TX)
947 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
948 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
950 /* Acknowledge the interrupt. */
951 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
957 bnx2_enable_bmsr1(struct bnx2 *bp)
959 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
960 (CHIP_NUM(bp) == CHIP_NUM_5709))
961 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
962 MII_BNX2_BLK_ADDR_GP_STATUS);
966 bnx2_disable_bmsr1(struct bnx2 *bp)
968 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
969 (CHIP_NUM(bp) == CHIP_NUM_5709))
970 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
971 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
975 bnx2_test_and_enable_2g5(struct bnx2 *bp)
980 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
983 if (bp->autoneg & AUTONEG_SPEED)
984 bp->advertising |= ADVERTISED_2500baseX_Full;
986 if (CHIP_NUM(bp) == CHIP_NUM_5709)
987 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
989 bnx2_read_phy(bp, bp->mii_up1, &up1);
990 if (!(up1 & BCM5708S_UP1_2G5)) {
991 up1 |= BCM5708S_UP1_2G5;
992 bnx2_write_phy(bp, bp->mii_up1, up1);
996 if (CHIP_NUM(bp) == CHIP_NUM_5709)
997 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
998 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1004 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1009 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1012 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1013 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1015 bnx2_read_phy(bp, bp->mii_up1, &up1);
1016 if (up1 & BCM5708S_UP1_2G5) {
1017 up1 &= ~BCM5708S_UP1_2G5;
1018 bnx2_write_phy(bp, bp->mii_up1, up1);
1022 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1023 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1024 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1030 bnx2_enable_forced_2g5(struct bnx2 *bp)
1034 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1037 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1040 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1041 MII_BNX2_BLK_ADDR_SERDES_DIG);
1042 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1043 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1044 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1045 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1047 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1048 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1049 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1051 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1052 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1053 bmcr |= BCM5708S_BMCR_FORCE_2500;
1056 if (bp->autoneg & AUTONEG_SPEED) {
1057 bmcr &= ~BMCR_ANENABLE;
1058 if (bp->req_duplex == DUPLEX_FULL)
1059 bmcr |= BMCR_FULLDPLX;
1061 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1065 bnx2_disable_forced_2g5(struct bnx2 *bp)
1069 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1072 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1075 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1076 MII_BNX2_BLK_ADDR_SERDES_DIG);
1077 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1078 val &= ~MII_BNX2_SD_MISC1_FORCE;
1079 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1081 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1082 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1083 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1085 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1086 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1087 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1090 if (bp->autoneg & AUTONEG_SPEED)
1091 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1092 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1096 bnx2_set_link(struct bnx2 *bp)
1101 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1106 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1109 link_up = bp->link_up;
1111 bnx2_enable_bmsr1(bp);
1112 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1113 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1114 bnx2_disable_bmsr1(bp);
1116 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1117 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1120 val = REG_RD(bp, BNX2_EMAC_STATUS);
1121 if (val & BNX2_EMAC_STATUS_LINK)
1122 bmsr |= BMSR_LSTATUS;
1124 bmsr &= ~BMSR_LSTATUS;
1127 if (bmsr & BMSR_LSTATUS) {
1130 if (bp->phy_flags & PHY_SERDES_FLAG) {
1131 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1132 bnx2_5706s_linkup(bp);
1133 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1134 bnx2_5708s_linkup(bp);
1135 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1136 bnx2_5709s_linkup(bp);
1139 bnx2_copper_linkup(bp);
1141 bnx2_resolve_flow_ctrl(bp);
1144 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1145 (bp->autoneg & AUTONEG_SPEED))
1146 bnx2_disable_forced_2g5(bp);
1148 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1152 if (bp->link_up != link_up) {
1153 bnx2_report_link(bp);
1156 bnx2_set_mac_link(bp);
1162 bnx2_reset_phy(struct bnx2 *bp)
1167 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1169 #define PHY_RESET_MAX_WAIT 100
1170 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1173 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1174 if (!(reg & BMCR_RESET)) {
1179 if (i == PHY_RESET_MAX_WAIT) {
1186 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1190 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1191 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1193 if (bp->phy_flags & PHY_SERDES_FLAG) {
1194 adv = ADVERTISE_1000XPAUSE;
1197 adv = ADVERTISE_PAUSE_CAP;
1200 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1201 if (bp->phy_flags & PHY_SERDES_FLAG) {
1202 adv = ADVERTISE_1000XPSE_ASYM;
1205 adv = ADVERTISE_PAUSE_ASYM;
1208 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1209 if (bp->phy_flags & PHY_SERDES_FLAG) {
1210 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1213 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1219 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1222 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1224 u32 speed_arg = 0, pause_adv;
1226 pause_adv = bnx2_phy_get_pause_adv(bp);
1228 if (bp->autoneg & AUTONEG_SPEED) {
1229 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1230 if (bp->advertising & ADVERTISED_10baseT_Half)
1231 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1232 if (bp->advertising & ADVERTISED_10baseT_Full)
1233 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1234 if (bp->advertising & ADVERTISED_100baseT_Half)
1235 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1236 if (bp->advertising & ADVERTISED_100baseT_Full)
1237 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1238 if (bp->advertising & ADVERTISED_1000baseT_Full)
1239 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1240 if (bp->advertising & ADVERTISED_2500baseX_Full)
1241 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1243 if (bp->req_line_speed == SPEED_2500)
1244 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1245 else if (bp->req_line_speed == SPEED_1000)
1246 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1247 else if (bp->req_line_speed == SPEED_100) {
1248 if (bp->req_duplex == DUPLEX_FULL)
1249 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1251 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1252 } else if (bp->req_line_speed == SPEED_10) {
1253 if (bp->req_duplex == DUPLEX_FULL)
1254 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1256 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1260 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1261 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1262 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1263 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1265 if (port == PORT_TP)
1266 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1267 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1269 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1271 spin_unlock_bh(&bp->phy_lock);
1272 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1273 spin_lock_bh(&bp->phy_lock);
1279 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1284 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1285 return (bnx2_setup_remote_phy(bp, port));
1287 if (!(bp->autoneg & AUTONEG_SPEED)) {
1289 int force_link_down = 0;
1291 if (bp->req_line_speed == SPEED_2500) {
1292 if (!bnx2_test_and_enable_2g5(bp))
1293 force_link_down = 1;
1294 } else if (bp->req_line_speed == SPEED_1000) {
1295 if (bnx2_test_and_disable_2g5(bp))
1296 force_link_down = 1;
1298 bnx2_read_phy(bp, bp->mii_adv, &adv);
1299 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1301 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1302 new_bmcr = bmcr & ~BMCR_ANENABLE;
1303 new_bmcr |= BMCR_SPEED1000;
1305 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1306 if (bp->req_line_speed == SPEED_2500)
1307 bnx2_enable_forced_2g5(bp);
1308 else if (bp->req_line_speed == SPEED_1000) {
1309 bnx2_disable_forced_2g5(bp);
1310 new_bmcr &= ~0x2000;
1313 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1314 if (bp->req_line_speed == SPEED_2500)
1315 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1317 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1320 if (bp->req_duplex == DUPLEX_FULL) {
1321 adv |= ADVERTISE_1000XFULL;
1322 new_bmcr |= BMCR_FULLDPLX;
1325 adv |= ADVERTISE_1000XHALF;
1326 new_bmcr &= ~BMCR_FULLDPLX;
1328 if ((new_bmcr != bmcr) || (force_link_down)) {
1329 /* Force a link down visible on the other side */
1331 bnx2_write_phy(bp, bp->mii_adv, adv &
1332 ~(ADVERTISE_1000XFULL |
1333 ADVERTISE_1000XHALF));
1334 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1335 BMCR_ANRESTART | BMCR_ANENABLE);
1338 netif_carrier_off(bp->dev);
1339 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1340 bnx2_report_link(bp);
1342 bnx2_write_phy(bp, bp->mii_adv, adv);
1343 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1345 bnx2_resolve_flow_ctrl(bp);
1346 bnx2_set_mac_link(bp);
1351 bnx2_test_and_enable_2g5(bp);
1353 if (bp->advertising & ADVERTISED_1000baseT_Full)
1354 new_adv |= ADVERTISE_1000XFULL;
1356 new_adv |= bnx2_phy_get_pause_adv(bp);
1358 bnx2_read_phy(bp, bp->mii_adv, &adv);
1359 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1361 bp->serdes_an_pending = 0;
1362 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1363 /* Force a link down visible on the other side */
1365 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1366 spin_unlock_bh(&bp->phy_lock);
1368 spin_lock_bh(&bp->phy_lock);
1371 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1372 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1374 /* Speed up link-up time when the link partner
1375 * does not autonegotiate which is very common
1376 * in blade servers. Some blade servers use
1377 * IPMI for kerboard input and it's important
1378 * to minimize link disruptions. Autoneg. involves
1379 * exchanging base pages plus 3 next pages and
1380 * normally completes in about 120 msec.
1382 bp->current_interval = SERDES_AN_TIMEOUT;
1383 bp->serdes_an_pending = 1;
1384 mod_timer(&bp->timer, jiffies + bp->current_interval);
1386 bnx2_resolve_flow_ctrl(bp);
1387 bnx2_set_mac_link(bp);
1393 #define ETHTOOL_ALL_FIBRE_SPEED \
1394 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1395 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1396 (ADVERTISED_1000baseT_Full)
1398 #define ETHTOOL_ALL_COPPER_SPEED \
1399 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1400 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1401 ADVERTISED_1000baseT_Full)
1403 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1404 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1406 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1409 bnx2_set_default_remote_link(struct bnx2 *bp)
1413 if (bp->phy_port == PORT_TP)
1414 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1416 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1418 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1419 bp->req_line_speed = 0;
1420 bp->autoneg |= AUTONEG_SPEED;
1421 bp->advertising = ADVERTISED_Autoneg;
1422 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1423 bp->advertising |= ADVERTISED_10baseT_Half;
1424 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1425 bp->advertising |= ADVERTISED_10baseT_Full;
1426 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1427 bp->advertising |= ADVERTISED_100baseT_Half;
1428 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1429 bp->advertising |= ADVERTISED_100baseT_Full;
1430 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1431 bp->advertising |= ADVERTISED_1000baseT_Full;
1432 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1433 bp->advertising |= ADVERTISED_2500baseX_Full;
1436 bp->advertising = 0;
1437 bp->req_duplex = DUPLEX_FULL;
1438 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1439 bp->req_line_speed = SPEED_10;
1440 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1441 bp->req_duplex = DUPLEX_HALF;
1443 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1444 bp->req_line_speed = SPEED_100;
1445 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1446 bp->req_duplex = DUPLEX_HALF;
1448 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1449 bp->req_line_speed = SPEED_1000;
1450 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1451 bp->req_line_speed = SPEED_2500;
1456 bnx2_set_default_link(struct bnx2 *bp)
1458 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1459 return bnx2_set_default_remote_link(bp);
1461 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1462 bp->req_line_speed = 0;
1463 if (bp->phy_flags & PHY_SERDES_FLAG) {
1466 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1468 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1469 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1470 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1472 bp->req_line_speed = bp->line_speed = SPEED_1000;
1473 bp->req_duplex = DUPLEX_FULL;
1476 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1480 bnx2_remote_phy_event(struct bnx2 *bp)
1483 u8 link_up = bp->link_up;
1486 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1488 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1494 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1495 bp->duplex = DUPLEX_FULL;
1497 case BNX2_LINK_STATUS_10HALF:
1498 bp->duplex = DUPLEX_HALF;
1499 case BNX2_LINK_STATUS_10FULL:
1500 bp->line_speed = SPEED_10;
1502 case BNX2_LINK_STATUS_100HALF:
1503 bp->duplex = DUPLEX_HALF;
1504 case BNX2_LINK_STATUS_100BASE_T4:
1505 case BNX2_LINK_STATUS_100FULL:
1506 bp->line_speed = SPEED_100;
1508 case BNX2_LINK_STATUS_1000HALF:
1509 bp->duplex = DUPLEX_HALF;
1510 case BNX2_LINK_STATUS_1000FULL:
1511 bp->line_speed = SPEED_1000;
1513 case BNX2_LINK_STATUS_2500HALF:
1514 bp->duplex = DUPLEX_HALF;
1515 case BNX2_LINK_STATUS_2500FULL:
1516 bp->line_speed = SPEED_2500;
1523 spin_lock(&bp->phy_lock);
1525 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1526 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1527 if (bp->duplex == DUPLEX_FULL)
1528 bp->flow_ctrl = bp->req_flow_ctrl;
1530 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1531 bp->flow_ctrl |= FLOW_CTRL_TX;
1532 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1533 bp->flow_ctrl |= FLOW_CTRL_RX;
1536 old_port = bp->phy_port;
1537 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1538 bp->phy_port = PORT_FIBRE;
1540 bp->phy_port = PORT_TP;
1542 if (old_port != bp->phy_port)
1543 bnx2_set_default_link(bp);
1545 spin_unlock(&bp->phy_lock);
1547 if (bp->link_up != link_up)
1548 bnx2_report_link(bp);
1550 bnx2_set_mac_link(bp);
1554 bnx2_set_remote_link(struct bnx2 *bp)
1558 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1560 case BNX2_FW_EVT_CODE_LINK_EVENT:
1561 bnx2_remote_phy_event(bp);
1563 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1571 bnx2_setup_copper_phy(struct bnx2 *bp)
1576 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1578 if (bp->autoneg & AUTONEG_SPEED) {
1579 u32 adv_reg, adv1000_reg;
1580 u32 new_adv_reg = 0;
1581 u32 new_adv1000_reg = 0;
1583 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1584 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1585 ADVERTISE_PAUSE_ASYM);
1587 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1588 adv1000_reg &= PHY_ALL_1000_SPEED;
1590 if (bp->advertising & ADVERTISED_10baseT_Half)
1591 new_adv_reg |= ADVERTISE_10HALF;
1592 if (bp->advertising & ADVERTISED_10baseT_Full)
1593 new_adv_reg |= ADVERTISE_10FULL;
1594 if (bp->advertising & ADVERTISED_100baseT_Half)
1595 new_adv_reg |= ADVERTISE_100HALF;
1596 if (bp->advertising & ADVERTISED_100baseT_Full)
1597 new_adv_reg |= ADVERTISE_100FULL;
1598 if (bp->advertising & ADVERTISED_1000baseT_Full)
1599 new_adv1000_reg |= ADVERTISE_1000FULL;
1601 new_adv_reg |= ADVERTISE_CSMA;
1603 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1605 if ((adv1000_reg != new_adv1000_reg) ||
1606 (adv_reg != new_adv_reg) ||
1607 ((bmcr & BMCR_ANENABLE) == 0)) {
1609 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1610 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1611 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1614 else if (bp->link_up) {
1615 /* Flow ctrl may have changed from auto to forced */
1616 /* or vice-versa. */
1618 bnx2_resolve_flow_ctrl(bp);
1619 bnx2_set_mac_link(bp);
1625 if (bp->req_line_speed == SPEED_100) {
1626 new_bmcr |= BMCR_SPEED100;
1628 if (bp->req_duplex == DUPLEX_FULL) {
1629 new_bmcr |= BMCR_FULLDPLX;
1631 if (new_bmcr != bmcr) {
1634 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1635 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1637 if (bmsr & BMSR_LSTATUS) {
1638 /* Force link down */
1639 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1640 spin_unlock_bh(&bp->phy_lock);
1642 spin_lock_bh(&bp->phy_lock);
1644 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1645 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1648 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1650 /* Normally, the new speed is setup after the link has
1651 * gone down and up again. In some cases, link will not go
1652 * down so we need to set up the new speed here.
1654 if (bmsr & BMSR_LSTATUS) {
1655 bp->line_speed = bp->req_line_speed;
1656 bp->duplex = bp->req_duplex;
1657 bnx2_resolve_flow_ctrl(bp);
1658 bnx2_set_mac_link(bp);
1661 bnx2_resolve_flow_ctrl(bp);
1662 bnx2_set_mac_link(bp);
1668 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1670 if (bp->loopback == MAC_LOOPBACK)
1673 if (bp->phy_flags & PHY_SERDES_FLAG) {
1674 return (bnx2_setup_serdes_phy(bp, port));
1677 return (bnx2_setup_copper_phy(bp));
1682 bnx2_init_5709s_phy(struct bnx2 *bp)
1686 bp->mii_bmcr = MII_BMCR + 0x10;
1687 bp->mii_bmsr = MII_BMSR + 0x10;
1688 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1689 bp->mii_adv = MII_ADVERTISE + 0x10;
1690 bp->mii_lpa = MII_LPA + 0x10;
1691 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1693 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1694 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1696 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1699 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1701 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1702 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1703 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1704 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1706 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1707 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1708 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1709 val |= BCM5708S_UP1_2G5;
1711 val &= ~BCM5708S_UP1_2G5;
1712 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1714 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1715 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1716 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1717 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1719 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1721 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1722 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1723 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1725 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1731 bnx2_init_5708s_phy(struct bnx2 *bp)
1737 bp->mii_up1 = BCM5708S_UP1;
1739 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1740 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1741 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1743 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1744 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1745 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1747 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1748 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1749 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1751 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1752 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1753 val |= BCM5708S_UP1_2G5;
1754 bnx2_write_phy(bp, BCM5708S_UP1, val);
1757 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1758 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1759 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1760 /* increase tx signal amplitude */
1761 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1762 BCM5708S_BLK_ADDR_TX_MISC);
1763 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1764 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1765 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1766 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1769 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1770 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1775 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1776 BNX2_SHARED_HW_CFG_CONFIG);
1777 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1778 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1779 BCM5708S_BLK_ADDR_TX_MISC);
1780 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1781 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1782 BCM5708S_BLK_ADDR_DIG);
1789 bnx2_init_5706s_phy(struct bnx2 *bp)
1793 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1795 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1796 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1798 if (bp->dev->mtu > 1500) {
1801 /* Set extended packet length bit */
1802 bnx2_write_phy(bp, 0x18, 0x7);
1803 bnx2_read_phy(bp, 0x18, &val);
1804 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1806 bnx2_write_phy(bp, 0x1c, 0x6c00);
1807 bnx2_read_phy(bp, 0x1c, &val);
1808 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1813 bnx2_write_phy(bp, 0x18, 0x7);
1814 bnx2_read_phy(bp, 0x18, &val);
1815 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1817 bnx2_write_phy(bp, 0x1c, 0x6c00);
1818 bnx2_read_phy(bp, 0x1c, &val);
1819 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1826 bnx2_init_copper_phy(struct bnx2 *bp)
1832 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1833 bnx2_write_phy(bp, 0x18, 0x0c00);
1834 bnx2_write_phy(bp, 0x17, 0x000a);
1835 bnx2_write_phy(bp, 0x15, 0x310b);
1836 bnx2_write_phy(bp, 0x17, 0x201f);
1837 bnx2_write_phy(bp, 0x15, 0x9506);
1838 bnx2_write_phy(bp, 0x17, 0x401f);
1839 bnx2_write_phy(bp, 0x15, 0x14e2);
1840 bnx2_write_phy(bp, 0x18, 0x0400);
1843 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1844 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1845 MII_BNX2_DSP_EXPAND_REG | 0x8);
1846 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1848 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1851 if (bp->dev->mtu > 1500) {
1852 /* Set extended packet length bit */
1853 bnx2_write_phy(bp, 0x18, 0x7);
1854 bnx2_read_phy(bp, 0x18, &val);
1855 bnx2_write_phy(bp, 0x18, val | 0x4000);
1857 bnx2_read_phy(bp, 0x10, &val);
1858 bnx2_write_phy(bp, 0x10, val | 0x1);
1861 bnx2_write_phy(bp, 0x18, 0x7);
1862 bnx2_read_phy(bp, 0x18, &val);
1863 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1865 bnx2_read_phy(bp, 0x10, &val);
1866 bnx2_write_phy(bp, 0x10, val & ~0x1);
1869 /* ethernet@wirespeed */
1870 bnx2_write_phy(bp, 0x18, 0x7007);
1871 bnx2_read_phy(bp, 0x18, &val);
1872 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1878 bnx2_init_phy(struct bnx2 *bp)
1883 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1884 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1886 bp->mii_bmcr = MII_BMCR;
1887 bp->mii_bmsr = MII_BMSR;
1888 bp->mii_bmsr1 = MII_BMSR;
1889 bp->mii_adv = MII_ADVERTISE;
1890 bp->mii_lpa = MII_LPA;
1892 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1894 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1897 bnx2_read_phy(bp, MII_PHYSID1, &val);
1898 bp->phy_id = val << 16;
1899 bnx2_read_phy(bp, MII_PHYSID2, &val);
1900 bp->phy_id |= val & 0xffff;
1902 if (bp->phy_flags & PHY_SERDES_FLAG) {
1903 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1904 rc = bnx2_init_5706s_phy(bp);
1905 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1906 rc = bnx2_init_5708s_phy(bp);
1907 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1908 rc = bnx2_init_5709s_phy(bp);
1911 rc = bnx2_init_copper_phy(bp);
1916 rc = bnx2_setup_phy(bp, bp->phy_port);
1922 bnx2_set_mac_loopback(struct bnx2 *bp)
1926 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1927 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1928 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1929 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1934 static int bnx2_test_link(struct bnx2 *);
1937 bnx2_set_phy_loopback(struct bnx2 *bp)
1942 spin_lock_bh(&bp->phy_lock);
1943 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1945 spin_unlock_bh(&bp->phy_lock);
1949 for (i = 0; i < 10; i++) {
1950 if (bnx2_test_link(bp) == 0)
1955 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1956 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1957 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1958 BNX2_EMAC_MODE_25G_MODE);
1960 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1961 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1967 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1973 msg_data |= bp->fw_wr_seq;
1975 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1977 /* wait for an acknowledgement. */
1978 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1981 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1983 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1986 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1989 /* If we timed out, inform the firmware that this is the case. */
1990 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1992 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1995 msg_data &= ~BNX2_DRV_MSG_CODE;
1996 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1998 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2003 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2010 bnx2_init_5709_context(struct bnx2 *bp)
2015 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2016 val |= (BCM_PAGE_BITS - 8) << 16;
2017 REG_WR(bp, BNX2_CTX_COMMAND, val);
2018 for (i = 0; i < 10; i++) {
2019 val = REG_RD(bp, BNX2_CTX_COMMAND);
2020 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2024 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2027 for (i = 0; i < bp->ctx_pages; i++) {
2030 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2031 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2032 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2033 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2034 (u64) bp->ctx_blk_mapping[i] >> 32);
2035 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2036 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2037 for (j = 0; j < 10; j++) {
2039 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2040 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2044 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2053 bnx2_init_context(struct bnx2 *bp)
2059 u32 vcid_addr, pcid_addr, offset;
2064 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2067 vcid_addr = GET_PCID_ADDR(vcid);
2069 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2074 pcid_addr = GET_PCID_ADDR(new_vcid);
2077 vcid_addr = GET_CID_ADDR(vcid);
2078 pcid_addr = vcid_addr;
2081 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2082 vcid_addr += (i << PHY_CTX_SHIFT);
2083 pcid_addr += (i << PHY_CTX_SHIFT);
2085 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2086 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2088 /* Zero out the context. */
2089 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2090 CTX_WR(bp, 0x00, offset, 0);
2092 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2093 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2099 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2105 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2106 if (good_mbuf == NULL) {
2107 printk(KERN_ERR PFX "Failed to allocate memory in "
2108 "bnx2_alloc_bad_rbuf\n");
2112 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2113 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2117 /* Allocate a bunch of mbufs and save the good ones in an array. */
2118 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2119 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2120 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2122 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2124 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2126 /* The addresses with Bit 9 set are bad memory blocks. */
2127 if (!(val & (1 << 9))) {
2128 good_mbuf[good_mbuf_cnt] = (u16) val;
2132 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2135 /* Free the good ones back to the mbuf pool thus discarding
2136 * all the bad ones. */
2137 while (good_mbuf_cnt) {
2140 val = good_mbuf[good_mbuf_cnt];
2141 val = (val << 9) | val | 1;
2143 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2150 bnx2_set_mac_addr(struct bnx2 *bp)
2153 u8 *mac_addr = bp->dev->dev_addr;
2155 val = (mac_addr[0] << 8) | mac_addr[1];
2157 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2159 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2160 (mac_addr[4] << 8) | mac_addr[5];
2162 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2166 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2168 struct sk_buff *skb;
2169 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2171 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2172 unsigned long align;
2174 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2179 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2180 skb_reserve(skb, BNX2_RX_ALIGN - align);
2182 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2183 PCI_DMA_FROMDEVICE);
2186 pci_unmap_addr_set(rx_buf, mapping, mapping);
2188 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2189 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2191 bp->rx_prod_bseq += bp->rx_buf_use_size;
2197 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2199 struct status_block *sblk = bp->status_blk;
2200 u32 new_link_state, old_link_state;
2203 new_link_state = sblk->status_attn_bits & event;
2204 old_link_state = sblk->status_attn_bits_ack & event;
2205 if (new_link_state != old_link_state) {
2207 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2209 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2217 bnx2_phy_int(struct bnx2 *bp)
2219 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2220 spin_lock(&bp->phy_lock);
2222 spin_unlock(&bp->phy_lock);
2224 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2225 bnx2_set_remote_link(bp);
2230 bnx2_tx_int(struct bnx2 *bp)
2232 struct status_block *sblk = bp->status_blk;
2233 u16 hw_cons, sw_cons, sw_ring_cons;
2236 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2237 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2240 sw_cons = bp->tx_cons;
2242 while (sw_cons != hw_cons) {
2243 struct sw_bd *tx_buf;
2244 struct sk_buff *skb;
2247 sw_ring_cons = TX_RING_IDX(sw_cons);
2249 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2252 /* partial BD completions possible with TSO packets */
2253 if (skb_is_gso(skb)) {
2254 u16 last_idx, last_ring_idx;
2256 last_idx = sw_cons +
2257 skb_shinfo(skb)->nr_frags + 1;
2258 last_ring_idx = sw_ring_cons +
2259 skb_shinfo(skb)->nr_frags + 1;
2260 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2263 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2268 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2269 skb_headlen(skb), PCI_DMA_TODEVICE);
2272 last = skb_shinfo(skb)->nr_frags;
2274 for (i = 0; i < last; i++) {
2275 sw_cons = NEXT_TX_BD(sw_cons);
2277 pci_unmap_page(bp->pdev,
2279 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2281 skb_shinfo(skb)->frags[i].size,
2285 sw_cons = NEXT_TX_BD(sw_cons);
2287 tx_free_bd += last + 1;
2291 hw_cons = bp->hw_tx_cons =
2292 sblk->status_tx_quick_consumer_index0;
2294 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2299 bp->tx_cons = sw_cons;
2300 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2301 * before checking for netif_queue_stopped(). Without the
2302 * memory barrier, there is a small possibility that bnx2_start_xmit()
2303 * will miss it and cause the queue to be stopped forever.
2307 if (unlikely(netif_queue_stopped(bp->dev)) &&
2308 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2309 netif_tx_lock(bp->dev);
2310 if ((netif_queue_stopped(bp->dev)) &&
2311 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2312 netif_wake_queue(bp->dev);
2313 netif_tx_unlock(bp->dev);
2318 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2321 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2322 struct rx_bd *cons_bd, *prod_bd;
2324 cons_rx_buf = &bp->rx_buf_ring[cons];
2325 prod_rx_buf = &bp->rx_buf_ring[prod];
2327 pci_dma_sync_single_for_device(bp->pdev,
2328 pci_unmap_addr(cons_rx_buf, mapping),
2329 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2331 bp->rx_prod_bseq += bp->rx_buf_use_size;
2333 prod_rx_buf->skb = skb;
2338 pci_unmap_addr_set(prod_rx_buf, mapping,
2339 pci_unmap_addr(cons_rx_buf, mapping));
2341 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2342 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2343 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2344 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2348 bnx2_rx_int(struct bnx2 *bp, int budget)
2350 struct status_block *sblk = bp->status_blk;
2351 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2352 struct l2_fhdr *rx_hdr;
2355 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2356 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2359 sw_cons = bp->rx_cons;
2360 sw_prod = bp->rx_prod;
2362 /* Memory barrier necessary as speculative reads of the rx
2363 * buffer can be ahead of the index in the status block
2366 while (sw_cons != hw_cons) {
2369 struct sw_bd *rx_buf;
2370 struct sk_buff *skb;
2371 dma_addr_t dma_addr;
2373 sw_ring_cons = RX_RING_IDX(sw_cons);
2374 sw_ring_prod = RX_RING_IDX(sw_prod);
2376 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2381 dma_addr = pci_unmap_addr(rx_buf, mapping);
2383 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2384 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2386 rx_hdr = (struct l2_fhdr *) skb->data;
2387 len = rx_hdr->l2_fhdr_pkt_len - 4;
2389 if ((status = rx_hdr->l2_fhdr_status) &
2390 (L2_FHDR_ERRORS_BAD_CRC |
2391 L2_FHDR_ERRORS_PHY_DECODE |
2392 L2_FHDR_ERRORS_ALIGNMENT |
2393 L2_FHDR_ERRORS_TOO_SHORT |
2394 L2_FHDR_ERRORS_GIANT_FRAME)) {
2399 /* Since we don't have a jumbo ring, copy small packets
2402 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2403 struct sk_buff *new_skb;
2405 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2406 if (new_skb == NULL)
2410 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2411 new_skb->data, len + 2);
2412 skb_reserve(new_skb, 2);
2413 skb_put(new_skb, len);
2415 bnx2_reuse_rx_skb(bp, skb,
2416 sw_ring_cons, sw_ring_prod);
2420 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2421 pci_unmap_single(bp->pdev, dma_addr,
2422 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2424 skb_reserve(skb, bp->rx_offset);
2429 bnx2_reuse_rx_skb(bp, skb,
2430 sw_ring_cons, sw_ring_prod);
2434 skb->protocol = eth_type_trans(skb, bp->dev);
2436 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2437 (ntohs(skb->protocol) != 0x8100)) {
2444 skb->ip_summed = CHECKSUM_NONE;
2446 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2447 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2449 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2450 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2451 skb->ip_summed = CHECKSUM_UNNECESSARY;
2455 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2456 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2457 rx_hdr->l2_fhdr_vlan_tag);
2461 netif_receive_skb(skb);
2463 bp->dev->last_rx = jiffies;
2467 sw_cons = NEXT_RX_BD(sw_cons);
2468 sw_prod = NEXT_RX_BD(sw_prod);
2470 if ((rx_pkt == budget))
2473 /* Refresh hw_cons to see if there is new work */
2474 if (sw_cons == hw_cons) {
2475 hw_cons = bp->hw_rx_cons =
2476 sblk->status_rx_quick_consumer_index0;
2477 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2482 bp->rx_cons = sw_cons;
2483 bp->rx_prod = sw_prod;
2485 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2487 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2495 /* MSI ISR - The only difference between this and the INTx ISR
2496 * is that the MSI interrupt is always serviced.
2499 bnx2_msi(int irq, void *dev_instance)
2501 struct net_device *dev = dev_instance;
2502 struct bnx2 *bp = netdev_priv(dev);
2504 prefetch(bp->status_blk);
2505 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2506 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2507 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2509 /* Return here if interrupt is disabled. */
2510 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2513 netif_rx_schedule(dev);
2519 bnx2_msi_1shot(int irq, void *dev_instance)
2521 struct net_device *dev = dev_instance;
2522 struct bnx2 *bp = netdev_priv(dev);
2524 prefetch(bp->status_blk);
2526 /* Return here if interrupt is disabled. */
2527 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2530 netif_rx_schedule(dev);
2536 bnx2_interrupt(int irq, void *dev_instance)
2538 struct net_device *dev = dev_instance;
2539 struct bnx2 *bp = netdev_priv(dev);
2541 /* When using INTx, it is possible for the interrupt to arrive
2542 * at the CPU before the status block posted prior to the
2543 * interrupt. Reading a register will flush the status block.
2544 * When using MSI, the MSI message will always complete after
2545 * the status block write.
2547 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2548 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2549 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2552 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2553 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2554 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2556 /* Return here if interrupt is shared and is disabled. */
2557 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2560 netif_rx_schedule(dev);
2565 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2566 STATUS_ATTN_BITS_TIMER_ABORT)
2569 bnx2_has_work(struct bnx2 *bp)
2571 struct status_block *sblk = bp->status_blk;
2573 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2574 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2577 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2578 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2585 bnx2_poll(struct net_device *dev, int *budget)
2587 struct bnx2 *bp = netdev_priv(dev);
2588 struct status_block *sblk = bp->status_blk;
2589 u32 status_attn_bits = sblk->status_attn_bits;
2590 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2592 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2593 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2597 /* This is needed to take care of transient status
2598 * during link changes.
2600 REG_WR(bp, BNX2_HC_COMMAND,
2601 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2602 REG_RD(bp, BNX2_HC_COMMAND);
2605 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2608 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2609 int orig_budget = *budget;
2612 if (orig_budget > dev->quota)
2613 orig_budget = dev->quota;
2615 work_done = bnx2_rx_int(bp, orig_budget);
2616 *budget -= work_done;
2617 dev->quota -= work_done;
2620 bp->last_status_idx = bp->status_blk->status_idx;
2623 if (!bnx2_has_work(bp)) {
2624 netif_rx_complete(dev);
2625 if (likely(bp->flags & USING_MSI_FLAG)) {
2626 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2627 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2628 bp->last_status_idx);
2631 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2632 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2633 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2634 bp->last_status_idx);
2636 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2637 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2638 bp->last_status_idx);
2645 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2646 * from set_multicast.
2649 bnx2_set_rx_mode(struct net_device *dev)
2651 struct bnx2 *bp = netdev_priv(dev);
2652 u32 rx_mode, sort_mode;
2655 spin_lock_bh(&bp->phy_lock);
2657 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2658 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2659 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2661 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2662 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2664 if (!(bp->flags & ASF_ENABLE_FLAG))
2665 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2667 if (dev->flags & IFF_PROMISC) {
2668 /* Promiscuous mode. */
2669 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2670 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2671 BNX2_RPM_SORT_USER0_PROM_VLAN;
2673 else if (dev->flags & IFF_ALLMULTI) {
2674 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2675 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2678 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2681 /* Accept one or more multicast(s). */
2682 struct dev_mc_list *mclist;
2683 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2688 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2690 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2691 i++, mclist = mclist->next) {
2693 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2695 regidx = (bit & 0xe0) >> 5;
2697 mc_filter[regidx] |= (1 << bit);
2700 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2701 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2705 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2708 if (rx_mode != bp->rx_mode) {
2709 bp->rx_mode = rx_mode;
2710 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2713 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2714 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2715 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2717 spin_unlock_bh(&bp->phy_lock);
2720 #define FW_BUF_SIZE 0x8000
2723 bnx2_gunzip_init(struct bnx2 *bp)
2725 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2728 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2731 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2732 if (bp->strm->workspace == NULL)
2742 vfree(bp->gunzip_buf);
2743 bp->gunzip_buf = NULL;
2746 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2747 "uncompression.\n", bp->dev->name);
2752 bnx2_gunzip_end(struct bnx2 *bp)
2754 kfree(bp->strm->workspace);
2759 if (bp->gunzip_buf) {
2760 vfree(bp->gunzip_buf);
2761 bp->gunzip_buf = NULL;
2766 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2770 /* check gzip header */
2771 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2777 if (zbuf[3] & FNAME)
2778 while ((zbuf[n++] != 0) && (n < len));
2780 bp->strm->next_in = zbuf + n;
2781 bp->strm->avail_in = len - n;
2782 bp->strm->next_out = bp->gunzip_buf;
2783 bp->strm->avail_out = FW_BUF_SIZE;
2785 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2789 rc = zlib_inflate(bp->strm, Z_FINISH);
2791 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2792 *outbuf = bp->gunzip_buf;
2794 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2795 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2796 bp->dev->name, bp->strm->msg);
2798 zlib_inflateEnd(bp->strm);
2800 if (rc == Z_STREAM_END)
2807 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2814 for (i = 0; i < rv2p_code_len; i += 8) {
2815 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2817 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2820 if (rv2p_proc == RV2P_PROC1) {
2821 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2822 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2825 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2826 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2830 /* Reset the processor, un-stall is done later. */
2831 if (rv2p_proc == RV2P_PROC1) {
2832 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2835 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2840 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2847 val = REG_RD_IND(bp, cpu_reg->mode);
2848 val |= cpu_reg->mode_value_halt;
2849 REG_WR_IND(bp, cpu_reg->mode, val);
2850 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2852 /* Load the Text area. */
2853 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2858 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2868 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2869 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2873 /* Load the Data area. */
2874 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2878 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2879 REG_WR_IND(bp, offset, fw->data[j]);
2883 /* Load the SBSS area. */
2884 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2888 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2889 REG_WR_IND(bp, offset, fw->sbss[j]);
2893 /* Load the BSS area. */
2894 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2898 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2899 REG_WR_IND(bp, offset, fw->bss[j]);
2903 /* Load the Read-Only area. */
2904 offset = cpu_reg->spad_base +
2905 (fw->rodata_addr - cpu_reg->mips_view_base);
2909 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2910 REG_WR_IND(bp, offset, fw->rodata[j]);
2914 /* Clear the pre-fetch instruction. */
2915 REG_WR_IND(bp, cpu_reg->inst, 0);
2916 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2918 /* Start the CPU. */
2919 val = REG_RD_IND(bp, cpu_reg->mode);
2920 val &= ~cpu_reg->mode_value_halt;
2921 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2922 REG_WR_IND(bp, cpu_reg->mode, val);
2928 bnx2_init_cpus(struct bnx2 *bp)
2930 struct cpu_reg cpu_reg;
2936 if ((rc = bnx2_gunzip_init(bp)) != 0)
2939 /* Initialize the RV2P processor. */
2940 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2945 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2947 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2952 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2954 /* Initialize the RX Processor. */
2955 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2956 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2957 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2958 cpu_reg.state = BNX2_RXP_CPU_STATE;
2959 cpu_reg.state_value_clear = 0xffffff;
2960 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2961 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2962 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2963 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2964 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2965 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2966 cpu_reg.mips_view_base = 0x8000000;
2968 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2969 fw = &bnx2_rxp_fw_09;
2971 fw = &bnx2_rxp_fw_06;
2973 rc = load_cpu_fw(bp, &cpu_reg, fw);
2977 /* Initialize the TX Processor. */
2978 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2979 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2980 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2981 cpu_reg.state = BNX2_TXP_CPU_STATE;
2982 cpu_reg.state_value_clear = 0xffffff;
2983 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2984 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2985 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2986 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2987 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2988 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2989 cpu_reg.mips_view_base = 0x8000000;
2991 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2992 fw = &bnx2_txp_fw_09;
2994 fw = &bnx2_txp_fw_06;
2996 rc = load_cpu_fw(bp, &cpu_reg, fw);
3000 /* Initialize the TX Patch-up Processor. */
3001 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3002 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3003 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3004 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3005 cpu_reg.state_value_clear = 0xffffff;
3006 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3007 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3008 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3009 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3010 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3011 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3012 cpu_reg.mips_view_base = 0x8000000;
3014 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3015 fw = &bnx2_tpat_fw_09;
3017 fw = &bnx2_tpat_fw_06;
3019 rc = load_cpu_fw(bp, &cpu_reg, fw);
3023 /* Initialize the Completion Processor. */
3024 cpu_reg.mode = BNX2_COM_CPU_MODE;
3025 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3026 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3027 cpu_reg.state = BNX2_COM_CPU_STATE;
3028 cpu_reg.state_value_clear = 0xffffff;
3029 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3030 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3031 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3032 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3033 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3034 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3035 cpu_reg.mips_view_base = 0x8000000;
3037 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3038 fw = &bnx2_com_fw_09;
3040 fw = &bnx2_com_fw_06;
3042 rc = load_cpu_fw(bp, &cpu_reg, fw);
3046 /* Initialize the Command Processor. */
3047 cpu_reg.mode = BNX2_CP_CPU_MODE;
3048 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3049 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3050 cpu_reg.state = BNX2_CP_CPU_STATE;
3051 cpu_reg.state_value_clear = 0xffffff;
3052 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3053 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3054 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3055 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3056 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3057 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3058 cpu_reg.mips_view_base = 0x8000000;
3060 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3061 fw = &bnx2_cp_fw_09;
3063 rc = load_cpu_fw(bp, &cpu_reg, fw);
3068 bnx2_gunzip_end(bp);
3073 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3077 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3083 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3084 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3085 PCI_PM_CTRL_PME_STATUS);
3087 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3088 /* delay required during transition out of D3hot */
3091 val = REG_RD(bp, BNX2_EMAC_MODE);
3092 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3093 val &= ~BNX2_EMAC_MODE_MPKT;
3094 REG_WR(bp, BNX2_EMAC_MODE, val);
3096 val = REG_RD(bp, BNX2_RPM_CONFIG);
3097 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3098 REG_WR(bp, BNX2_RPM_CONFIG, val);
3109 autoneg = bp->autoneg;
3110 advertising = bp->advertising;
3112 bp->autoneg = AUTONEG_SPEED;
3113 bp->advertising = ADVERTISED_10baseT_Half |
3114 ADVERTISED_10baseT_Full |
3115 ADVERTISED_100baseT_Half |
3116 ADVERTISED_100baseT_Full |
3119 bnx2_setup_copper_phy(bp);
3121 bp->autoneg = autoneg;
3122 bp->advertising = advertising;
3124 bnx2_set_mac_addr(bp);
3126 val = REG_RD(bp, BNX2_EMAC_MODE);
3128 /* Enable port mode. */
3129 val &= ~BNX2_EMAC_MODE_PORT;
3130 val |= BNX2_EMAC_MODE_PORT_MII |
3131 BNX2_EMAC_MODE_MPKT_RCVD |
3132 BNX2_EMAC_MODE_ACPI_RCVD |
3133 BNX2_EMAC_MODE_MPKT;
3135 REG_WR(bp, BNX2_EMAC_MODE, val);
3137 /* receive all multicast */
3138 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3139 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3142 REG_WR(bp, BNX2_EMAC_RX_MODE,
3143 BNX2_EMAC_RX_MODE_SORT_MODE);
3145 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3146 BNX2_RPM_SORT_USER0_MC_EN;
3147 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3148 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3149 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3150 BNX2_RPM_SORT_USER0_ENA);
3152 /* Need to enable EMAC and RPM for WOL. */
3153 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3154 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3155 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3156 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3158 val = REG_RD(bp, BNX2_RPM_CONFIG);
3159 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3160 REG_WR(bp, BNX2_RPM_CONFIG, val);
3162 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3165 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3168 if (!(bp->flags & NO_WOL_FLAG))
3169 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3171 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3172 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3173 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3182 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3184 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3187 /* No more memory access after this point until
3188 * device is brought back to D0.
3200 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3205 /* Request access to the flash interface. */
3206 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3207 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3208 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3209 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3215 if (j >= NVRAM_TIMEOUT_COUNT)
3222 bnx2_release_nvram_lock(struct bnx2 *bp)
3227 /* Relinquish nvram interface. */
3228 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3230 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3231 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3232 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3238 if (j >= NVRAM_TIMEOUT_COUNT)
3246 bnx2_enable_nvram_write(struct bnx2 *bp)
3250 val = REG_RD(bp, BNX2_MISC_CFG);
3251 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3253 if (!bp->flash_info->buffered) {
3256 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3257 REG_WR(bp, BNX2_NVM_COMMAND,
3258 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3260 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3263 val = REG_RD(bp, BNX2_NVM_COMMAND);
3264 if (val & BNX2_NVM_COMMAND_DONE)
3268 if (j >= NVRAM_TIMEOUT_COUNT)
3275 bnx2_disable_nvram_write(struct bnx2 *bp)
3279 val = REG_RD(bp, BNX2_MISC_CFG);
3280 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3285 bnx2_enable_nvram_access(struct bnx2 *bp)
3289 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3290 /* Enable both bits, even on read. */
3291 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3292 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3296 bnx2_disable_nvram_access(struct bnx2 *bp)
3300 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3301 /* Disable both bits, even after read. */
3302 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3303 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3304 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3308 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3313 if (bp->flash_info->buffered)
3314 /* Buffered flash, no erase needed */
3317 /* Build an erase command */
3318 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3319 BNX2_NVM_COMMAND_DOIT;
3321 /* Need to clear DONE bit separately. */
3322 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3324 /* Address of the NVRAM to read from. */
3325 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3327 /* Issue an erase command. */
3328 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3330 /* Wait for completion. */
3331 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3336 val = REG_RD(bp, BNX2_NVM_COMMAND);
3337 if (val & BNX2_NVM_COMMAND_DONE)
3341 if (j >= NVRAM_TIMEOUT_COUNT)
3348 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3353 /* Build the command word. */
3354 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3356 /* Calculate an offset of a buffered flash. */
3357 if (bp->flash_info->buffered) {
3358 offset = ((offset / bp->flash_info->page_size) <<
3359 bp->flash_info->page_bits) +
3360 (offset % bp->flash_info->page_size);
3363 /* Need to clear DONE bit separately. */
3364 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3366 /* Address of the NVRAM to read from. */
3367 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3369 /* Issue a read command. */
3370 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3372 /* Wait for completion. */
3373 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3378 val = REG_RD(bp, BNX2_NVM_COMMAND);
3379 if (val & BNX2_NVM_COMMAND_DONE) {
3380 val = REG_RD(bp, BNX2_NVM_READ);
3382 val = be32_to_cpu(val);
3383 memcpy(ret_val, &val, 4);
3387 if (j >= NVRAM_TIMEOUT_COUNT)
3395 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3400 /* Build the command word. */
3401 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3403 /* Calculate an offset of a buffered flash. */
3404 if (bp->flash_info->buffered) {
3405 offset = ((offset / bp->flash_info->page_size) <<
3406 bp->flash_info->page_bits) +
3407 (offset % bp->flash_info->page_size);
3410 /* Need to clear DONE bit separately. */
3411 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3413 memcpy(&val32, val, 4);
3414 val32 = cpu_to_be32(val32);
3416 /* Write the data. */
3417 REG_WR(bp, BNX2_NVM_WRITE, val32);
3419 /* Address of the NVRAM to write to. */
3420 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3422 /* Issue the write command. */
3423 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3425 /* Wait for completion. */
3426 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3429 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3432 if (j >= NVRAM_TIMEOUT_COUNT)
3439 bnx2_init_nvram(struct bnx2 *bp)
3442 int j, entry_count, rc;
3443 struct flash_spec *flash;
3445 /* Determine the selected interface. */
3446 val = REG_RD(bp, BNX2_NVM_CFG1);
3448 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3451 if (val & 0x40000000) {
3453 /* Flash interface has been reconfigured */
3454 for (j = 0, flash = &flash_table[0]; j < entry_count;
3456 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3457 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3458 bp->flash_info = flash;
3465 /* Not yet been reconfigured */
3467 if (val & (1 << 23))
3468 mask = FLASH_BACKUP_STRAP_MASK;
3470 mask = FLASH_STRAP_MASK;
3472 for (j = 0, flash = &flash_table[0]; j < entry_count;
3475 if ((val & mask) == (flash->strapping & mask)) {
3476 bp->flash_info = flash;
3478 /* Request access to the flash interface. */
3479 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3482 /* Enable access to flash interface */
3483 bnx2_enable_nvram_access(bp);
3485 /* Reconfigure the flash interface */
3486 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3487 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3488 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3489 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3491 /* Disable access to flash interface */
3492 bnx2_disable_nvram_access(bp);
3493 bnx2_release_nvram_lock(bp);
3498 } /* if (val & 0x40000000) */
3500 if (j == entry_count) {
3501 bp->flash_info = NULL;
3502 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3506 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3507 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3509 bp->flash_size = val;
3511 bp->flash_size = bp->flash_info->total_size;
3517 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3521 u32 cmd_flags, offset32, len32, extra;
3526 /* Request access to the flash interface. */
3527 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3530 /* Enable access to flash interface */
3531 bnx2_enable_nvram_access(bp);
3544 pre_len = 4 - (offset & 3);
3546 if (pre_len >= len32) {
3548 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3549 BNX2_NVM_COMMAND_LAST;
3552 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3555 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3560 memcpy(ret_buf, buf + (offset & 3), pre_len);
3567 extra = 4 - (len32 & 3);
3568 len32 = (len32 + 4) & ~3;
3575 cmd_flags = BNX2_NVM_COMMAND_LAST;
3577 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3578 BNX2_NVM_COMMAND_LAST;
3580 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3582 memcpy(ret_buf, buf, 4 - extra);
3584 else if (len32 > 0) {
3587 /* Read the first word. */
3591 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3593 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3595 /* Advance to the next dword. */
3600 while (len32 > 4 && rc == 0) {
3601 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3603 /* Advance to the next dword. */
3612 cmd_flags = BNX2_NVM_COMMAND_LAST;
3613 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3615 memcpy(ret_buf, buf, 4 - extra);
3618 /* Disable access to flash interface */
3619 bnx2_disable_nvram_access(bp);
3621 bnx2_release_nvram_lock(bp);
3627 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3630 u32 written, offset32, len32;
3631 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3633 int align_start, align_end;
3638 align_start = align_end = 0;
3640 if ((align_start = (offset32 & 3))) {
3642 len32 += align_start;
3645 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3650 align_end = 4 - (len32 & 3);
3652 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3656 if (align_start || align_end) {
3657 align_buf = kmalloc(len32, GFP_KERNEL);
3658 if (align_buf == NULL)
3661 memcpy(align_buf, start, 4);
3664 memcpy(align_buf + len32 - 4, end, 4);
3666 memcpy(align_buf + align_start, data_buf, buf_size);
3670 if (bp->flash_info->buffered == 0) {
3671 flash_buffer = kmalloc(264, GFP_KERNEL);
3672 if (flash_buffer == NULL) {
3674 goto nvram_write_end;
3679 while ((written < len32) && (rc == 0)) {
3680 u32 page_start, page_end, data_start, data_end;
3681 u32 addr, cmd_flags;
3684 /* Find the page_start addr */
3685 page_start = offset32 + written;
3686 page_start -= (page_start % bp->flash_info->page_size);
3687 /* Find the page_end addr */
3688 page_end = page_start + bp->flash_info->page_size;
3689 /* Find the data_start addr */
3690 data_start = (written == 0) ? offset32 : page_start;
3691 /* Find the data_end addr */
3692 data_end = (page_end > offset32 + len32) ?
3693 (offset32 + len32) : page_end;
3695 /* Request access to the flash interface. */
3696 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3697 goto nvram_write_end;
3699 /* Enable access to flash interface */
3700 bnx2_enable_nvram_access(bp);
3702 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3703 if (bp->flash_info->buffered == 0) {
3706 /* Read the whole page into the buffer
3707 * (non-buffer flash only) */
3708 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3709 if (j == (bp->flash_info->page_size - 4)) {
3710 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3712 rc = bnx2_nvram_read_dword(bp,
3718 goto nvram_write_end;
3724 /* Enable writes to flash interface (unlock write-protect) */
3725 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3726 goto nvram_write_end;
3728 /* Loop to write back the buffer data from page_start to
3731 if (bp->flash_info->buffered == 0) {
3732 /* Erase the page */
3733 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3734 goto nvram_write_end;
3736 /* Re-enable the write again for the actual write */
3737 bnx2_enable_nvram_write(bp);
3739 for (addr = page_start; addr < data_start;
3740 addr += 4, i += 4) {
3742 rc = bnx2_nvram_write_dword(bp, addr,
3743 &flash_buffer[i], cmd_flags);
3746 goto nvram_write_end;
3752 /* Loop to write the new data from data_start to data_end */
3753 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3754 if ((addr == page_end - 4) ||
3755 ((bp->flash_info->buffered) &&
3756 (addr == data_end - 4))) {
3758 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3760 rc = bnx2_nvram_write_dword(bp, addr, buf,
3764 goto nvram_write_end;
3770 /* Loop to write back the buffer data from data_end
3772 if (bp->flash_info->buffered == 0) {
3773 for (addr = data_end; addr < page_end;
3774 addr += 4, i += 4) {
3776 if (addr == page_end-4) {
3777 cmd_flags = BNX2_NVM_COMMAND_LAST;
3779 rc = bnx2_nvram_write_dword(bp, addr,
3780 &flash_buffer[i], cmd_flags);
3783 goto nvram_write_end;
3789 /* Disable writes to flash interface (lock write-protect) */
3790 bnx2_disable_nvram_write(bp);
3792 /* Disable access to flash interface */
3793 bnx2_disable_nvram_access(bp);
3794 bnx2_release_nvram_lock(bp);
3796 /* Increment written */
3797 written += data_end - data_start;
3801 kfree(flash_buffer);
3807 bnx2_init_remote_phy(struct bnx2 *bp)
3811 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3812 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3815 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3816 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3819 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3820 if (netif_running(bp->dev)) {
3821 val = BNX2_DRV_ACK_CAP_SIGNATURE |
3822 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3823 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3826 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3828 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3829 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3830 bp->phy_port = PORT_FIBRE;
3832 bp->phy_port = PORT_TP;
3837 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3842 /* Wait for the current PCI transaction to complete before
3843 * issuing a reset. */
3844 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3845 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3846 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3847 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3848 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3849 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3852 /* Wait for the firmware to tell us it is ok to issue a reset. */
3853 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3855 /* Deposit a driver reset signature so the firmware knows that
3856 * this is a soft reset. */
3857 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3858 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3860 /* Do a dummy read to force the chip to complete all current transaction
3861 * before we issue a reset. */
3862 val = REG_RD(bp, BNX2_MISC_ID);
3864 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3865 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3866 REG_RD(bp, BNX2_MISC_COMMAND);
3869 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3870 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3872 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3875 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3876 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3877 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3880 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3882 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3883 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3884 current->state = TASK_UNINTERRUPTIBLE;
3885 schedule_timeout(HZ / 50);
3888 /* Reset takes approximate 30 usec */
3889 for (i = 0; i < 10; i++) {
3890 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3891 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3892 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3897 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3898 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3899 printk(KERN_ERR PFX "Chip reset did not complete\n");
3904 /* Make sure byte swapping is properly configured. */
3905 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3906 if (val != 0x01020304) {
3907 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3911 /* Wait for the firmware to finish its initialization. */
3912 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3916 spin_lock_bh(&bp->phy_lock);
3917 bnx2_init_remote_phy(bp);
3918 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
3919 bnx2_set_default_remote_link(bp);
3920 spin_unlock_bh(&bp->phy_lock);
3922 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3923 /* Adjust the voltage regular to two steps lower. The default
3924 * of this register is 0x0000000e. */
3925 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3927 /* Remove bad rbuf memory from the free pool. */
3928 rc = bnx2_alloc_bad_rbuf(bp);
3935 bnx2_init_chip(struct bnx2 *bp)
3940 /* Make sure the interrupt is not active. */
3941 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3943 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3944 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3946 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3948 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3949 DMA_READ_CHANS << 12 |
3950 DMA_WRITE_CHANS << 16;
3952 val |= (0x2 << 20) | (1 << 11);
3954 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3957 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3958 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3959 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3961 REG_WR(bp, BNX2_DMA_CONFIG, val);
3963 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3964 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3965 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3966 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3969 if (bp->flags & PCIX_FLAG) {
3972 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3974 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3975 val16 & ~PCI_X_CMD_ERO);
3978 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3979 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3980 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3981 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3983 /* Initialize context mapping and zero out the quick contexts. The
3984 * context block must have already been enabled. */
3985 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3986 rc = bnx2_init_5709_context(bp);
3990 bnx2_init_context(bp);
3992 if ((rc = bnx2_init_cpus(bp)) != 0)
3995 bnx2_init_nvram(bp);
3997 bnx2_set_mac_addr(bp);
3999 val = REG_RD(bp, BNX2_MQ_CONFIG);
4000 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4001 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4002 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4003 val |= BNX2_MQ_CONFIG_HALT_DIS;
4005 REG_WR(bp, BNX2_MQ_CONFIG, val);
4007 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4008 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4009 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4011 val = (BCM_PAGE_BITS - 8) << 24;
4012 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4014 /* Configure page size. */
4015 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4016 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4017 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4018 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4020 val = bp->mac_addr[0] +
4021 (bp->mac_addr[1] << 8) +
4022 (bp->mac_addr[2] << 16) +
4024 (bp->mac_addr[4] << 8) +
4025 (bp->mac_addr[5] << 16);
4026 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4028 /* Program the MTU. Also include 4 bytes for CRC32. */
4029 val = bp->dev->mtu + ETH_HLEN + 4;
4030 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4031 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4032 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4034 bp->last_status_idx = 0;
4035 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4037 /* Set up how to generate a link change interrupt. */
4038 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4040 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4041 (u64) bp->status_blk_mapping & 0xffffffff);
4042 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4044 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4045 (u64) bp->stats_blk_mapping & 0xffffffff);
4046 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4047 (u64) bp->stats_blk_mapping >> 32);
4049 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4050 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4052 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4053 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4055 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4056 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4058 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4060 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4062 REG_WR(bp, BNX2_HC_COM_TICKS,
4063 (bp->com_ticks_int << 16) | bp->com_ticks);
4065 REG_WR(bp, BNX2_HC_CMD_TICKS,
4066 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4068 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4069 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4071 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
4072 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4074 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4075 val = BNX2_HC_CONFIG_COLLECT_STATS;
4077 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4078 BNX2_HC_CONFIG_COLLECT_STATS;
4081 if (bp->flags & ONE_SHOT_MSI_FLAG)
4082 val |= BNX2_HC_CONFIG_ONE_SHOT;
4084 REG_WR(bp, BNX2_HC_CONFIG, val);
4086 /* Clear internal stats counters. */
4087 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4089 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4091 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
4092 BNX2_PORT_FEATURE_ASF_ENABLED)
4093 bp->flags |= ASF_ENABLE_FLAG;
4095 /* Initialize the receive filter. */
4096 bnx2_set_rx_mode(bp->dev);
4098 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4099 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4100 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4101 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4103 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4106 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
4107 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4111 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4117 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4119 u32 val, offset0, offset1, offset2, offset3;
4121 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4122 offset0 = BNX2_L2CTX_TYPE_XI;
4123 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4124 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4125 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4127 offset0 = BNX2_L2CTX_TYPE;
4128 offset1 = BNX2_L2CTX_CMD_TYPE;
4129 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4130 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4132 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4133 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4135 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4136 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4138 val = (u64) bp->tx_desc_mapping >> 32;
4139 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4141 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4142 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4146 bnx2_init_tx_ring(struct bnx2 *bp)
4151 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4153 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4155 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4156 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4161 bp->tx_prod_bseq = 0;
4164 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4165 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4167 bnx2_init_tx_context(bp, cid);
4171 bnx2_init_rx_ring(struct bnx2 *bp)
4175 u16 prod, ring_prod;
4178 /* 8 for CRC and VLAN */
4179 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4181 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4183 ring_prod = prod = bp->rx_prod = 0;
4186 bp->rx_prod_bseq = 0;
4188 for (i = 0; i < bp->rx_max_ring; i++) {
4191 rxbd = &bp->rx_desc_ring[i][0];
4192 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4193 rxbd->rx_bd_len = bp->rx_buf_use_size;
4194 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4196 if (i == (bp->rx_max_ring - 1))
4200 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4201 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4205 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4206 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4208 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4210 val = (u64) bp->rx_desc_mapping[0] >> 32;
4211 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4213 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4214 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4216 for (i = 0; i < bp->rx_ring_size; i++) {
4217 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4220 prod = NEXT_RX_BD(prod);
4221 ring_prod = RX_RING_IDX(prod);
4225 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4227 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4231 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4235 bp->rx_ring_size = size;
4237 while (size > MAX_RX_DESC_CNT) {
4238 size -= MAX_RX_DESC_CNT;
4241 /* round to next power of 2 */
4243 while ((max & num_rings) == 0)
4246 if (num_rings != max)
4249 bp->rx_max_ring = max;
4250 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4254 bnx2_free_tx_skbs(struct bnx2 *bp)
4258 if (bp->tx_buf_ring == NULL)
4261 for (i = 0; i < TX_DESC_CNT; ) {
4262 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4263 struct sk_buff *skb = tx_buf->skb;
4271 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4272 skb_headlen(skb), PCI_DMA_TODEVICE);
4276 last = skb_shinfo(skb)->nr_frags;
4277 for (j = 0; j < last; j++) {
4278 tx_buf = &bp->tx_buf_ring[i + j + 1];
4279 pci_unmap_page(bp->pdev,
4280 pci_unmap_addr(tx_buf, mapping),
4281 skb_shinfo(skb)->frags[j].size,
4291 bnx2_free_rx_skbs(struct bnx2 *bp)
4295 if (bp->rx_buf_ring == NULL)
4298 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4299 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4300 struct sk_buff *skb = rx_buf->skb;
4305 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4306 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4315 bnx2_free_skbs(struct bnx2 *bp)
4317 bnx2_free_tx_skbs(bp);
4318 bnx2_free_rx_skbs(bp);
4322 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4326 rc = bnx2_reset_chip(bp, reset_code);
4331 if ((rc = bnx2_init_chip(bp)) != 0)
4334 bnx2_init_tx_ring(bp);
4335 bnx2_init_rx_ring(bp);
4340 bnx2_init_nic(struct bnx2 *bp)
4344 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4347 spin_lock_bh(&bp->phy_lock);
4350 spin_unlock_bh(&bp->phy_lock);
4355 bnx2_test_registers(struct bnx2 *bp)
4359 static const struct {
4362 #define BNX2_FL_NOT_5709 1
4366 { 0x006c, 0, 0x00000000, 0x0000003f },
4367 { 0x0090, 0, 0xffffffff, 0x00000000 },
4368 { 0x0094, 0, 0x00000000, 0x00000000 },
4370 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4371 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4372 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4373 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4374 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4375 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4376 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4377 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4378 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4380 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4381 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4382 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4383 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4384 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4385 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4387 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4388 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4389 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4391 { 0x1000, 0, 0x00000000, 0x00000001 },
4392 { 0x1004, 0, 0x00000000, 0x000f0001 },
4394 { 0x1408, 0, 0x01c00800, 0x00000000 },
4395 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4396 { 0x14a8, 0, 0x00000000, 0x000001ff },
4397 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4398 { 0x14b0, 0, 0x00000002, 0x00000001 },
4399 { 0x14b8, 0, 0x00000000, 0x00000000 },
4400 { 0x14c0, 0, 0x00000000, 0x00000009 },
4401 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4402 { 0x14cc, 0, 0x00000000, 0x00000001 },
4403 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4405 { 0x1800, 0, 0x00000000, 0x00000001 },
4406 { 0x1804, 0, 0x00000000, 0x00000003 },
4408 { 0x2800, 0, 0x00000000, 0x00000001 },
4409 { 0x2804, 0, 0x00000000, 0x00003f01 },
4410 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4411 { 0x2810, 0, 0xffff0000, 0x00000000 },
4412 { 0x2814, 0, 0xffff0000, 0x00000000 },
4413 { 0x2818, 0, 0xffff0000, 0x00000000 },
4414 { 0x281c, 0, 0xffff0000, 0x00000000 },
4415 { 0x2834, 0, 0xffffffff, 0x00000000 },
4416 { 0x2840, 0, 0x00000000, 0xffffffff },
4417 { 0x2844, 0, 0x00000000, 0xffffffff },
4418 { 0x2848, 0, 0xffffffff, 0x00000000 },
4419 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4421 { 0x2c00, 0, 0x00000000, 0x00000011 },
4422 { 0x2c04, 0, 0x00000000, 0x00030007 },
4424 { 0x3c00, 0, 0x00000000, 0x00000001 },
4425 { 0x3c04, 0, 0x00000000, 0x00070000 },
4426 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4427 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4428 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4429 { 0x3c14, 0, 0x00000000, 0xffffffff },
4430 { 0x3c18, 0, 0x00000000, 0xffffffff },
4431 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4432 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4434 { 0x5004, 0, 0x00000000, 0x0000007f },
4435 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4437 { 0x5c00, 0, 0x00000000, 0x00000001 },
4438 { 0x5c04, 0, 0x00000000, 0x0003000f },
4439 { 0x5c08, 0, 0x00000003, 0x00000000 },
4440 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4441 { 0x5c10, 0, 0x00000000, 0xffffffff },
4442 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4443 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4444 { 0x5c88, 0, 0x00000000, 0x00077373 },
4445 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4447 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4448 { 0x680c, 0, 0xffffffff, 0x00000000 },
4449 { 0x6810, 0, 0xffffffff, 0x00000000 },
4450 { 0x6814, 0, 0xffffffff, 0x00000000 },
4451 { 0x6818, 0, 0xffffffff, 0x00000000 },
4452 { 0x681c, 0, 0xffffffff, 0x00000000 },
4453 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4454 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4455 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4456 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4457 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4458 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4459 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4460 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4461 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4462 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4463 { 0x684c, 0, 0xffffffff, 0x00000000 },
4464 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4465 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4466 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4467 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4468 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4469 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4471 { 0xffff, 0, 0x00000000, 0x00000000 },
4476 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4479 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4480 u32 offset, rw_mask, ro_mask, save_val, val;
4481 u16 flags = reg_tbl[i].flags;
4483 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4486 offset = (u32) reg_tbl[i].offset;
4487 rw_mask = reg_tbl[i].rw_mask;
4488 ro_mask = reg_tbl[i].ro_mask;
4490 save_val = readl(bp->regview + offset);
4492 writel(0, bp->regview + offset);
4494 val = readl(bp->regview + offset);
4495 if ((val & rw_mask) != 0) {
4499 if ((val & ro_mask) != (save_val & ro_mask)) {
4503 writel(0xffffffff, bp->regview + offset);
4505 val = readl(bp->regview + offset);
4506 if ((val & rw_mask) != rw_mask) {
4510 if ((val & ro_mask) != (save_val & ro_mask)) {
4514 writel(save_val, bp->regview + offset);
4518 writel(save_val, bp->regview + offset);
4526 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4528 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4529 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4532 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4535 for (offset = 0; offset < size; offset += 4) {
4537 REG_WR_IND(bp, start + offset, test_pattern[i]);
4539 if (REG_RD_IND(bp, start + offset) !=
4549 bnx2_test_memory(struct bnx2 *bp)
4553 static struct mem_entry {
4556 } mem_tbl_5706[] = {
4557 { 0x60000, 0x4000 },
4558 { 0xa0000, 0x3000 },
4559 { 0xe0000, 0x4000 },
4560 { 0x120000, 0x4000 },
4561 { 0x1a0000, 0x4000 },
4562 { 0x160000, 0x4000 },
4566 { 0x60000, 0x4000 },
4567 { 0xa0000, 0x3000 },
4568 { 0xe0000, 0x4000 },
4569 { 0x120000, 0x4000 },
4570 { 0x1a0000, 0x4000 },
4573 struct mem_entry *mem_tbl;
4575 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4576 mem_tbl = mem_tbl_5709;
4578 mem_tbl = mem_tbl_5706;
4580 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4581 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4582 mem_tbl[i].len)) != 0) {
4590 #define BNX2_MAC_LOOPBACK 0
4591 #define BNX2_PHY_LOOPBACK 1
4594 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4596 unsigned int pkt_size, num_pkts, i;
4597 struct sk_buff *skb, *rx_skb;
4598 unsigned char *packet;
4599 u16 rx_start_idx, rx_idx;
4602 struct sw_bd *rx_buf;
4603 struct l2_fhdr *rx_hdr;
4606 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4607 bp->loopback = MAC_LOOPBACK;
4608 bnx2_set_mac_loopback(bp);
4610 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4611 bp->loopback = PHY_LOOPBACK;
4612 bnx2_set_phy_loopback(bp);
4618 skb = netdev_alloc_skb(bp->dev, pkt_size);
4621 packet = skb_put(skb, pkt_size);
4622 memcpy(packet, bp->dev->dev_addr, 6);
4623 memset(packet + 6, 0x0, 8);
4624 for (i = 14; i < pkt_size; i++)
4625 packet[i] = (unsigned char) (i & 0xff);
4627 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4630 REG_WR(bp, BNX2_HC_COMMAND,
4631 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4633 REG_RD(bp, BNX2_HC_COMMAND);
4636 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4640 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4642 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4643 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4644 txbd->tx_bd_mss_nbytes = pkt_size;
4645 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4648 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4649 bp->tx_prod_bseq += pkt_size;
4651 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4652 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4656 REG_WR(bp, BNX2_HC_COMMAND,
4657 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4659 REG_RD(bp, BNX2_HC_COMMAND);
4663 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4666 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4667 goto loopback_test_done;
4670 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4671 if (rx_idx != rx_start_idx + num_pkts) {
4672 goto loopback_test_done;
4675 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4676 rx_skb = rx_buf->skb;
4678 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4679 skb_reserve(rx_skb, bp->rx_offset);
4681 pci_dma_sync_single_for_cpu(bp->pdev,
4682 pci_unmap_addr(rx_buf, mapping),
4683 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4685 if (rx_hdr->l2_fhdr_status &
4686 (L2_FHDR_ERRORS_BAD_CRC |
4687 L2_FHDR_ERRORS_PHY_DECODE |
4688 L2_FHDR_ERRORS_ALIGNMENT |
4689 L2_FHDR_ERRORS_TOO_SHORT |
4690 L2_FHDR_ERRORS_GIANT_FRAME)) {
4692 goto loopback_test_done;
4695 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4696 goto loopback_test_done;
4699 for (i = 14; i < pkt_size; i++) {
4700 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4701 goto loopback_test_done;
4712 #define BNX2_MAC_LOOPBACK_FAILED 1
4713 #define BNX2_PHY_LOOPBACK_FAILED 2
4714 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4715 BNX2_PHY_LOOPBACK_FAILED)
4718 bnx2_test_loopback(struct bnx2 *bp)
4722 if (!netif_running(bp->dev))
4723 return BNX2_LOOPBACK_FAILED;
4725 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4726 spin_lock_bh(&bp->phy_lock);
4728 spin_unlock_bh(&bp->phy_lock);
4729 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4730 rc |= BNX2_MAC_LOOPBACK_FAILED;
4731 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4732 rc |= BNX2_PHY_LOOPBACK_FAILED;
4736 #define NVRAM_SIZE 0x200
4737 #define CRC32_RESIDUAL 0xdebb20e3
4740 bnx2_test_nvram(struct bnx2 *bp)
4742 u32 buf[NVRAM_SIZE / 4];
4743 u8 *data = (u8 *) buf;
4747 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4748 goto test_nvram_done;
4750 magic = be32_to_cpu(buf[0]);
4751 if (magic != 0x669955aa) {
4753 goto test_nvram_done;
4756 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4757 goto test_nvram_done;
4759 csum = ether_crc_le(0x100, data);
4760 if (csum != CRC32_RESIDUAL) {
4762 goto test_nvram_done;
4765 csum = ether_crc_le(0x100, data + 0x100);
4766 if (csum != CRC32_RESIDUAL) {
4775 bnx2_test_link(struct bnx2 *bp)
4779 spin_lock_bh(&bp->phy_lock);
4780 bnx2_enable_bmsr1(bp);
4781 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4782 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4783 bnx2_disable_bmsr1(bp);
4784 spin_unlock_bh(&bp->phy_lock);
4786 if (bmsr & BMSR_LSTATUS) {
4793 bnx2_test_intr(struct bnx2 *bp)
4798 if (!netif_running(bp->dev))
4801 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4803 /* This register is not touched during run-time. */
4804 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4805 REG_RD(bp, BNX2_HC_COMMAND);
4807 for (i = 0; i < 10; i++) {
4808 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4814 msleep_interruptible(10);
4823 bnx2_5706_serdes_timer(struct bnx2 *bp)
4825 spin_lock(&bp->phy_lock);
4826 if (bp->serdes_an_pending)
4827 bp->serdes_an_pending--;
4828 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4831 bp->current_interval = bp->timer_interval;
4833 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4835 if (bmcr & BMCR_ANENABLE) {
4838 bnx2_write_phy(bp, 0x1c, 0x7c00);
4839 bnx2_read_phy(bp, 0x1c, &phy1);
4841 bnx2_write_phy(bp, 0x17, 0x0f01);
4842 bnx2_read_phy(bp, 0x15, &phy2);
4843 bnx2_write_phy(bp, 0x17, 0x0f01);
4844 bnx2_read_phy(bp, 0x15, &phy2);
4846 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4847 !(phy2 & 0x20)) { /* no CONFIG */
4849 bmcr &= ~BMCR_ANENABLE;
4850 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4851 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4852 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4856 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4857 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4860 bnx2_write_phy(bp, 0x17, 0x0f01);
4861 bnx2_read_phy(bp, 0x15, &phy2);
4865 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4866 bmcr |= BMCR_ANENABLE;
4867 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4869 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4872 bp->current_interval = bp->timer_interval;
4874 spin_unlock(&bp->phy_lock);
4878 bnx2_5708_serdes_timer(struct bnx2 *bp)
4880 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4883 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4884 bp->serdes_an_pending = 0;
4888 spin_lock(&bp->phy_lock);
4889 if (bp->serdes_an_pending)
4890 bp->serdes_an_pending--;
4891 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4894 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4895 if (bmcr & BMCR_ANENABLE) {
4896 bnx2_enable_forced_2g5(bp);
4897 bp->current_interval = SERDES_FORCED_TIMEOUT;
4899 bnx2_disable_forced_2g5(bp);
4900 bp->serdes_an_pending = 2;
4901 bp->current_interval = bp->timer_interval;
4905 bp->current_interval = bp->timer_interval;
4907 spin_unlock(&bp->phy_lock);
4911 bnx2_timer(unsigned long data)
4913 struct bnx2 *bp = (struct bnx2 *) data;
4916 if (!netif_running(bp->dev))
4919 if (atomic_read(&bp->intr_sem) != 0)
4920 goto bnx2_restart_timer;
4922 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4923 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4925 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4927 /* workaround occasional corrupted counters */
4928 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4929 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4930 BNX2_HC_COMMAND_STATS_NOW);
4932 if (bp->phy_flags & PHY_SERDES_FLAG) {
4933 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4934 bnx2_5706_serdes_timer(bp);
4936 bnx2_5708_serdes_timer(bp);
4940 mod_timer(&bp->timer, jiffies + bp->current_interval);
4944 bnx2_request_irq(struct bnx2 *bp)
4946 struct net_device *dev = bp->dev;
4949 if (bp->flags & USING_MSI_FLAG) {
4950 irq_handler_t fn = bnx2_msi;
4952 if (bp->flags & ONE_SHOT_MSI_FLAG)
4953 fn = bnx2_msi_1shot;
4955 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4957 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4958 IRQF_SHARED, dev->name, dev);
4963 bnx2_free_irq(struct bnx2 *bp)
4965 struct net_device *dev = bp->dev;
4967 if (bp->flags & USING_MSI_FLAG) {
4968 free_irq(bp->pdev->irq, dev);
4969 pci_disable_msi(bp->pdev);
4970 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4972 free_irq(bp->pdev->irq, dev);
4975 /* Called with rtnl_lock */
4977 bnx2_open(struct net_device *dev)
4979 struct bnx2 *bp = netdev_priv(dev);
4982 netif_carrier_off(dev);
4984 bnx2_set_power_state(bp, PCI_D0);
4985 bnx2_disable_int(bp);
4987 rc = bnx2_alloc_mem(bp);
4991 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
4992 if (pci_enable_msi(bp->pdev) == 0) {
4993 bp->flags |= USING_MSI_FLAG;
4994 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4995 bp->flags |= ONE_SHOT_MSI_FLAG;
4998 rc = bnx2_request_irq(bp);
5005 rc = bnx2_init_nic(bp);
5014 mod_timer(&bp->timer, jiffies + bp->current_interval);
5016 atomic_set(&bp->intr_sem, 0);
5018 bnx2_enable_int(bp);
5020 if (bp->flags & USING_MSI_FLAG) {
5021 /* Test MSI to make sure it is working
5022 * If MSI test fails, go back to INTx mode
5024 if (bnx2_test_intr(bp) != 0) {
5025 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5026 " using MSI, switching to INTx mode. Please"
5027 " report this failure to the PCI maintainer"
5028 " and include system chipset information.\n",
5031 bnx2_disable_int(bp);
5034 rc = bnx2_init_nic(bp);
5037 rc = bnx2_request_irq(bp);
5042 del_timer_sync(&bp->timer);
5045 bnx2_enable_int(bp);
5048 if (bp->flags & USING_MSI_FLAG) {
5049 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5052 netif_start_queue(dev);
5058 bnx2_reset_task(struct work_struct *work)
5060 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5062 if (!netif_running(bp->dev))
5065 bp->in_reset_task = 1;
5066 bnx2_netif_stop(bp);
5070 atomic_set(&bp->intr_sem, 1);
5071 bnx2_netif_start(bp);
5072 bp->in_reset_task = 0;
5076 bnx2_tx_timeout(struct net_device *dev)
5078 struct bnx2 *bp = netdev_priv(dev);
5080 /* This allows the netif to be shutdown gracefully before resetting */
5081 schedule_work(&bp->reset_task);
5085 /* Called with rtnl_lock */
5087 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5089 struct bnx2 *bp = netdev_priv(dev);
5091 bnx2_netif_stop(bp);
5094 bnx2_set_rx_mode(dev);
5096 bnx2_netif_start(bp);
5100 /* Called with netif_tx_lock.
5101 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5102 * netif_wake_queue().
5105 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5107 struct bnx2 *bp = netdev_priv(dev);
5110 struct sw_bd *tx_buf;
5111 u32 len, vlan_tag_flags, last_frag, mss;
5112 u16 prod, ring_prod;
5115 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5116 netif_stop_queue(dev);
5117 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5120 return NETDEV_TX_BUSY;
5122 len = skb_headlen(skb);
5124 ring_prod = TX_RING_IDX(prod);
5127 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5128 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5131 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5133 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5135 if ((mss = skb_shinfo(skb)->gso_size)) {
5136 u32 tcp_opt_len, ip_tcp_len;
5139 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5141 tcp_opt_len = tcp_optlen(skb);
5143 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5144 u32 tcp_off = skb_transport_offset(skb) -
5145 sizeof(struct ipv6hdr) - ETH_HLEN;
5147 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5148 TX_BD_FLAGS_SW_FLAGS;
5149 if (likely(tcp_off == 0))
5150 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5153 vlan_tag_flags |= ((tcp_off & 0x3) <<
5154 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5155 ((tcp_off & 0x10) <<
5156 TX_BD_FLAGS_TCP6_OFF4_SHL);
5157 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5160 if (skb_header_cloned(skb) &&
5161 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5163 return NETDEV_TX_OK;
5166 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5170 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5171 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5175 if (tcp_opt_len || (iph->ihl > 5)) {
5176 vlan_tag_flags |= ((iph->ihl - 5) +
5177 (tcp_opt_len >> 2)) << 8;
5183 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5185 tx_buf = &bp->tx_buf_ring[ring_prod];
5187 pci_unmap_addr_set(tx_buf, mapping, mapping);
5189 txbd = &bp->tx_desc_ring[ring_prod];
5191 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5192 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5193 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5194 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5196 last_frag = skb_shinfo(skb)->nr_frags;
5198 for (i = 0; i < last_frag; i++) {
5199 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5201 prod = NEXT_TX_BD(prod);
5202 ring_prod = TX_RING_IDX(prod);
5203 txbd = &bp->tx_desc_ring[ring_prod];
5206 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5207 len, PCI_DMA_TODEVICE);
5208 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5211 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5212 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5213 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5214 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5217 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5219 prod = NEXT_TX_BD(prod);
5220 bp->tx_prod_bseq += skb->len;
5222 REG_WR16(bp, bp->tx_bidx_addr, prod);
5223 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5228 dev->trans_start = jiffies;
5230 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5231 netif_stop_queue(dev);
5232 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5233 netif_wake_queue(dev);
5236 return NETDEV_TX_OK;
5239 /* Called with rtnl_lock */
5241 bnx2_close(struct net_device *dev)
5243 struct bnx2 *bp = netdev_priv(dev);
5246 /* Calling flush_scheduled_work() may deadlock because
5247 * linkwatch_event() may be on the workqueue and it will try to get
5248 * the rtnl_lock which we are holding.
5250 while (bp->in_reset_task)
5253 bnx2_netif_stop(bp);
5254 del_timer_sync(&bp->timer);
5255 if (bp->flags & NO_WOL_FLAG)
5256 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5258 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5260 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5261 bnx2_reset_chip(bp, reset_code);
5266 netif_carrier_off(bp->dev);
5267 bnx2_set_power_state(bp, PCI_D3hot);
5271 #define GET_NET_STATS64(ctr) \
5272 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5273 (unsigned long) (ctr##_lo)
5275 #define GET_NET_STATS32(ctr) \
5278 #if (BITS_PER_LONG == 64)
5279 #define GET_NET_STATS GET_NET_STATS64
5281 #define GET_NET_STATS GET_NET_STATS32
5284 static struct net_device_stats *
5285 bnx2_get_stats(struct net_device *dev)
5287 struct bnx2 *bp = netdev_priv(dev);
5288 struct statistics_block *stats_blk = bp->stats_blk;
5289 struct net_device_stats *net_stats = &bp->net_stats;
5291 if (bp->stats_blk == NULL) {
5294 net_stats->rx_packets =
5295 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5296 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5297 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5299 net_stats->tx_packets =
5300 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5301 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5302 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5304 net_stats->rx_bytes =
5305 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5307 net_stats->tx_bytes =
5308 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5310 net_stats->multicast =
5311 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5313 net_stats->collisions =
5314 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5316 net_stats->rx_length_errors =
5317 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5318 stats_blk->stat_EtherStatsOverrsizePkts);
5320 net_stats->rx_over_errors =
5321 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5323 net_stats->rx_frame_errors =
5324 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5326 net_stats->rx_crc_errors =
5327 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5329 net_stats->rx_errors = net_stats->rx_length_errors +
5330 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5331 net_stats->rx_crc_errors;
5333 net_stats->tx_aborted_errors =
5334 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5335 stats_blk->stat_Dot3StatsLateCollisions);
5337 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5338 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5339 net_stats->tx_carrier_errors = 0;
5341 net_stats->tx_carrier_errors =
5343 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5346 net_stats->tx_errors =
5348 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5350 net_stats->tx_aborted_errors +
5351 net_stats->tx_carrier_errors;
5353 net_stats->rx_missed_errors =
5354 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5355 stats_blk->stat_FwRxDrop);
5360 /* All ethtool functions called with rtnl_lock */
5363 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5365 struct bnx2 *bp = netdev_priv(dev);
5366 int support_serdes = 0, support_copper = 0;
5368 cmd->supported = SUPPORTED_Autoneg;
5369 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5372 } else if (bp->phy_port == PORT_FIBRE)
5377 if (support_serdes) {
5378 cmd->supported |= SUPPORTED_1000baseT_Full |
5380 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5381 cmd->supported |= SUPPORTED_2500baseX_Full;
5384 if (support_copper) {
5385 cmd->supported |= SUPPORTED_10baseT_Half |
5386 SUPPORTED_10baseT_Full |
5387 SUPPORTED_100baseT_Half |
5388 SUPPORTED_100baseT_Full |
5389 SUPPORTED_1000baseT_Full |
5394 spin_lock_bh(&bp->phy_lock);
5395 cmd->port = bp->phy_port;
5396 cmd->advertising = bp->advertising;
5398 if (bp->autoneg & AUTONEG_SPEED) {
5399 cmd->autoneg = AUTONEG_ENABLE;
5402 cmd->autoneg = AUTONEG_DISABLE;
5405 if (netif_carrier_ok(dev)) {
5406 cmd->speed = bp->line_speed;
5407 cmd->duplex = bp->duplex;
5413 spin_unlock_bh(&bp->phy_lock);
5415 cmd->transceiver = XCVR_INTERNAL;
5416 cmd->phy_address = bp->phy_addr;
5422 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5424 struct bnx2 *bp = netdev_priv(dev);
5425 u8 autoneg = bp->autoneg;
5426 u8 req_duplex = bp->req_duplex;
5427 u16 req_line_speed = bp->req_line_speed;
5428 u32 advertising = bp->advertising;
5431 spin_lock_bh(&bp->phy_lock);
5433 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5434 goto err_out_unlock;
5436 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5437 goto err_out_unlock;
5439 if (cmd->autoneg == AUTONEG_ENABLE) {
5440 autoneg |= AUTONEG_SPEED;
5442 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5444 /* allow advertising 1 speed */
5445 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5446 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5447 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5448 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5450 if (cmd->port == PORT_FIBRE)
5451 goto err_out_unlock;
5453 advertising = cmd->advertising;
5455 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5456 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5457 (cmd->port == PORT_TP))
5458 goto err_out_unlock;
5459 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5460 advertising = cmd->advertising;
5461 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5462 goto err_out_unlock;
5464 if (cmd->port == PORT_FIBRE)
5465 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5467 advertising = ETHTOOL_ALL_COPPER_SPEED;
5469 advertising |= ADVERTISED_Autoneg;
5472 if (cmd->port == PORT_FIBRE) {
5473 if ((cmd->speed != SPEED_1000 &&
5474 cmd->speed != SPEED_2500) ||
5475 (cmd->duplex != DUPLEX_FULL))
5476 goto err_out_unlock;
5478 if (cmd->speed == SPEED_2500 &&
5479 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5480 goto err_out_unlock;
5482 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5483 goto err_out_unlock;
5485 autoneg &= ~AUTONEG_SPEED;
5486 req_line_speed = cmd->speed;
5487 req_duplex = cmd->duplex;
5491 bp->autoneg = autoneg;
5492 bp->advertising = advertising;
5493 bp->req_line_speed = req_line_speed;
5494 bp->req_duplex = req_duplex;
5496 err = bnx2_setup_phy(bp, cmd->port);
5499 spin_unlock_bh(&bp->phy_lock);
5505 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5507 struct bnx2 *bp = netdev_priv(dev);
5509 strcpy(info->driver, DRV_MODULE_NAME);
5510 strcpy(info->version, DRV_MODULE_VERSION);
5511 strcpy(info->bus_info, pci_name(bp->pdev));
5512 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5513 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5514 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
5515 info->fw_version[1] = info->fw_version[3] = '.';
5516 info->fw_version[5] = 0;
5519 #define BNX2_REGDUMP_LEN (32 * 1024)
5522 bnx2_get_regs_len(struct net_device *dev)
5524 return BNX2_REGDUMP_LEN;
5528 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5530 u32 *p = _p, i, offset;
5532 struct bnx2 *bp = netdev_priv(dev);
5533 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5534 0x0800, 0x0880, 0x0c00, 0x0c10,
5535 0x0c30, 0x0d08, 0x1000, 0x101c,
5536 0x1040, 0x1048, 0x1080, 0x10a4,
5537 0x1400, 0x1490, 0x1498, 0x14f0,
5538 0x1500, 0x155c, 0x1580, 0x15dc,
5539 0x1600, 0x1658, 0x1680, 0x16d8,
5540 0x1800, 0x1820, 0x1840, 0x1854,
5541 0x1880, 0x1894, 0x1900, 0x1984,
5542 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5543 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5544 0x2000, 0x2030, 0x23c0, 0x2400,
5545 0x2800, 0x2820, 0x2830, 0x2850,
5546 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5547 0x3c00, 0x3c94, 0x4000, 0x4010,
5548 0x4080, 0x4090, 0x43c0, 0x4458,
5549 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5550 0x4fc0, 0x5010, 0x53c0, 0x5444,
5551 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5552 0x5fc0, 0x6000, 0x6400, 0x6428,
5553 0x6800, 0x6848, 0x684c, 0x6860,
5554 0x6888, 0x6910, 0x8000 };
5558 memset(p, 0, BNX2_REGDUMP_LEN);
5560 if (!netif_running(bp->dev))
5564 offset = reg_boundaries[0];
5566 while (offset < BNX2_REGDUMP_LEN) {
5567 *p++ = REG_RD(bp, offset);
5569 if (offset == reg_boundaries[i + 1]) {
5570 offset = reg_boundaries[i + 2];
5571 p = (u32 *) (orig_p + offset);
5578 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5580 struct bnx2 *bp = netdev_priv(dev);
5582 if (bp->flags & NO_WOL_FLAG) {
5587 wol->supported = WAKE_MAGIC;
5589 wol->wolopts = WAKE_MAGIC;
5593 memset(&wol->sopass, 0, sizeof(wol->sopass));
5597 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5599 struct bnx2 *bp = netdev_priv(dev);
5601 if (wol->wolopts & ~WAKE_MAGIC)
5604 if (wol->wolopts & WAKE_MAGIC) {
5605 if (bp->flags & NO_WOL_FLAG)
5617 bnx2_nway_reset(struct net_device *dev)
5619 struct bnx2 *bp = netdev_priv(dev);
5622 if (!(bp->autoneg & AUTONEG_SPEED)) {
5626 spin_lock_bh(&bp->phy_lock);
5628 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5631 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5632 spin_unlock_bh(&bp->phy_lock);
5636 /* Force a link down visible on the other side */
5637 if (bp->phy_flags & PHY_SERDES_FLAG) {
5638 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5639 spin_unlock_bh(&bp->phy_lock);
5643 spin_lock_bh(&bp->phy_lock);
5645 bp->current_interval = SERDES_AN_TIMEOUT;
5646 bp->serdes_an_pending = 1;
5647 mod_timer(&bp->timer, jiffies + bp->current_interval);
5650 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5651 bmcr &= ~BMCR_LOOPBACK;
5652 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5654 spin_unlock_bh(&bp->phy_lock);
5660 bnx2_get_eeprom_len(struct net_device *dev)
5662 struct bnx2 *bp = netdev_priv(dev);
5664 if (bp->flash_info == NULL)
5667 return (int) bp->flash_size;
5671 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5674 struct bnx2 *bp = netdev_priv(dev);
5677 /* parameters already validated in ethtool_get_eeprom */
5679 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5685 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5688 struct bnx2 *bp = netdev_priv(dev);
5691 /* parameters already validated in ethtool_set_eeprom */
5693 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5699 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5701 struct bnx2 *bp = netdev_priv(dev);
5703 memset(coal, 0, sizeof(struct ethtool_coalesce));
5705 coal->rx_coalesce_usecs = bp->rx_ticks;
5706 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5707 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5708 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5710 coal->tx_coalesce_usecs = bp->tx_ticks;
5711 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5712 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5713 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5715 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5721 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5723 struct bnx2 *bp = netdev_priv(dev);
5725 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5726 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5728 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5729 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5731 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5732 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5734 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5735 if (bp->rx_quick_cons_trip_int > 0xff)
5736 bp->rx_quick_cons_trip_int = 0xff;
5738 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5739 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5741 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5742 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5744 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5745 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5747 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5748 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5751 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5752 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5753 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5754 bp->stats_ticks = USEC_PER_SEC;
5756 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5757 bp->stats_ticks &= 0xffff00;
5759 if (netif_running(bp->dev)) {
5760 bnx2_netif_stop(bp);
5762 bnx2_netif_start(bp);
5769 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5771 struct bnx2 *bp = netdev_priv(dev);
5773 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5774 ering->rx_mini_max_pending = 0;
5775 ering->rx_jumbo_max_pending = 0;
5777 ering->rx_pending = bp->rx_ring_size;
5778 ering->rx_mini_pending = 0;
5779 ering->rx_jumbo_pending = 0;
5781 ering->tx_max_pending = MAX_TX_DESC_CNT;
5782 ering->tx_pending = bp->tx_ring_size;
5786 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5788 struct bnx2 *bp = netdev_priv(dev);
5790 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5791 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5792 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5796 if (netif_running(bp->dev)) {
5797 bnx2_netif_stop(bp);
5798 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5803 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5804 bp->tx_ring_size = ering->tx_pending;
5806 if (netif_running(bp->dev)) {
5809 rc = bnx2_alloc_mem(bp);
5813 bnx2_netif_start(bp);
5820 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5822 struct bnx2 *bp = netdev_priv(dev);
5824 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5825 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5826 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5830 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5832 struct bnx2 *bp = netdev_priv(dev);
5834 bp->req_flow_ctrl = 0;
5835 if (epause->rx_pause)
5836 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5837 if (epause->tx_pause)
5838 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5840 if (epause->autoneg) {
5841 bp->autoneg |= AUTONEG_FLOW_CTRL;
5844 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5847 spin_lock_bh(&bp->phy_lock);
5849 bnx2_setup_phy(bp, bp->phy_port);
5851 spin_unlock_bh(&bp->phy_lock);
5857 bnx2_get_rx_csum(struct net_device *dev)
5859 struct bnx2 *bp = netdev_priv(dev);
5865 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5867 struct bnx2 *bp = netdev_priv(dev);
5874 bnx2_set_tso(struct net_device *dev, u32 data)
5876 struct bnx2 *bp = netdev_priv(dev);
5879 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5880 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5881 dev->features |= NETIF_F_TSO6;
5883 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5888 #define BNX2_NUM_STATS 46
5891 char string[ETH_GSTRING_LEN];
5892 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5894 { "rx_error_bytes" },
5896 { "tx_error_bytes" },
5897 { "rx_ucast_packets" },
5898 { "rx_mcast_packets" },
5899 { "rx_bcast_packets" },
5900 { "tx_ucast_packets" },
5901 { "tx_mcast_packets" },
5902 { "tx_bcast_packets" },
5903 { "tx_mac_errors" },
5904 { "tx_carrier_errors" },
5905 { "rx_crc_errors" },
5906 { "rx_align_errors" },
5907 { "tx_single_collisions" },
5908 { "tx_multi_collisions" },
5910 { "tx_excess_collisions" },
5911 { "tx_late_collisions" },
5912 { "tx_total_collisions" },
5915 { "rx_undersize_packets" },
5916 { "rx_oversize_packets" },
5917 { "rx_64_byte_packets" },
5918 { "rx_65_to_127_byte_packets" },
5919 { "rx_128_to_255_byte_packets" },
5920 { "rx_256_to_511_byte_packets" },
5921 { "rx_512_to_1023_byte_packets" },
5922 { "rx_1024_to_1522_byte_packets" },
5923 { "rx_1523_to_9022_byte_packets" },
5924 { "tx_64_byte_packets" },
5925 { "tx_65_to_127_byte_packets" },
5926 { "tx_128_to_255_byte_packets" },
5927 { "tx_256_to_511_byte_packets" },
5928 { "tx_512_to_1023_byte_packets" },
5929 { "tx_1024_to_1522_byte_packets" },
5930 { "tx_1523_to_9022_byte_packets" },
5931 { "rx_xon_frames" },
5932 { "rx_xoff_frames" },
5933 { "tx_xon_frames" },
5934 { "tx_xoff_frames" },
5935 { "rx_mac_ctrl_frames" },
5936 { "rx_filtered_packets" },
5938 { "rx_fw_discards" },
5941 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5943 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5944 STATS_OFFSET32(stat_IfHCInOctets_hi),
5945 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5946 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5947 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5948 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5949 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5950 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5951 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5952 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5953 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5954 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5955 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5956 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5957 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5958 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5959 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5960 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5961 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5962 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5963 STATS_OFFSET32(stat_EtherStatsCollisions),
5964 STATS_OFFSET32(stat_EtherStatsFragments),
5965 STATS_OFFSET32(stat_EtherStatsJabbers),
5966 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5967 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5968 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5969 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5970 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5971 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5972 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5973 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5974 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5975 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5976 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5977 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5978 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5979 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5980 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5981 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5982 STATS_OFFSET32(stat_XonPauseFramesReceived),
5983 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5984 STATS_OFFSET32(stat_OutXonSent),
5985 STATS_OFFSET32(stat_OutXoffSent),
5986 STATS_OFFSET32(stat_MacControlFramesReceived),
5987 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5988 STATS_OFFSET32(stat_IfInMBUFDiscards),
5989 STATS_OFFSET32(stat_FwRxDrop),
5992 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5993 * skipped because of errata.
5995 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5996 8,0,8,8,8,8,8,8,8,8,
5997 4,0,4,4,4,4,4,4,4,4,
5998 4,4,4,4,4,4,4,4,4,4,
5999 4,4,4,4,4,4,4,4,4,4,
6003 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6004 8,0,8,8,8,8,8,8,8,8,
6005 4,4,4,4,4,4,4,4,4,4,
6006 4,4,4,4,4,4,4,4,4,4,
6007 4,4,4,4,4,4,4,4,4,4,
6011 #define BNX2_NUM_TESTS 6
6014 char string[ETH_GSTRING_LEN];
6015 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6016 { "register_test (offline)" },
6017 { "memory_test (offline)" },
6018 { "loopback_test (offline)" },
6019 { "nvram_test (online)" },
6020 { "interrupt_test (online)" },
6021 { "link_test (online)" },
6025 bnx2_self_test_count(struct net_device *dev)
6027 return BNX2_NUM_TESTS;
6031 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6033 struct bnx2 *bp = netdev_priv(dev);
6035 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6036 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6039 bnx2_netif_stop(bp);
6040 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6043 if (bnx2_test_registers(bp) != 0) {
6045 etest->flags |= ETH_TEST_FL_FAILED;
6047 if (bnx2_test_memory(bp) != 0) {
6049 etest->flags |= ETH_TEST_FL_FAILED;
6051 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6052 etest->flags |= ETH_TEST_FL_FAILED;
6054 if (!netif_running(bp->dev)) {
6055 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6059 bnx2_netif_start(bp);
6062 /* wait for link up */
6063 for (i = 0; i < 7; i++) {
6066 msleep_interruptible(1000);
6070 if (bnx2_test_nvram(bp) != 0) {
6072 etest->flags |= ETH_TEST_FL_FAILED;
6074 if (bnx2_test_intr(bp) != 0) {
6076 etest->flags |= ETH_TEST_FL_FAILED;
6079 if (bnx2_test_link(bp) != 0) {
6081 etest->flags |= ETH_TEST_FL_FAILED;
6087 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6089 switch (stringset) {
6091 memcpy(buf, bnx2_stats_str_arr,
6092 sizeof(bnx2_stats_str_arr));
6095 memcpy(buf, bnx2_tests_str_arr,
6096 sizeof(bnx2_tests_str_arr));
6102 bnx2_get_stats_count(struct net_device *dev)
6104 return BNX2_NUM_STATS;
6108 bnx2_get_ethtool_stats(struct net_device *dev,
6109 struct ethtool_stats *stats, u64 *buf)
6111 struct bnx2 *bp = netdev_priv(dev);
6113 u32 *hw_stats = (u32 *) bp->stats_blk;
6114 u8 *stats_len_arr = NULL;
6116 if (hw_stats == NULL) {
6117 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6121 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6122 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6123 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6124 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6125 stats_len_arr = bnx2_5706_stats_len_arr;
6127 stats_len_arr = bnx2_5708_stats_len_arr;
6129 for (i = 0; i < BNX2_NUM_STATS; i++) {
6130 if (stats_len_arr[i] == 0) {
6131 /* skip this counter */
6135 if (stats_len_arr[i] == 4) {
6136 /* 4-byte counter */
6138 *(hw_stats + bnx2_stats_offset_arr[i]);
6141 /* 8-byte counter */
6142 buf[i] = (((u64) *(hw_stats +
6143 bnx2_stats_offset_arr[i])) << 32) +
6144 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6149 bnx2_phys_id(struct net_device *dev, u32 data)
6151 struct bnx2 *bp = netdev_priv(dev);
6158 save = REG_RD(bp, BNX2_MISC_CFG);
6159 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6161 for (i = 0; i < (data * 2); i++) {
6163 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6166 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6167 BNX2_EMAC_LED_1000MB_OVERRIDE |
6168 BNX2_EMAC_LED_100MB_OVERRIDE |
6169 BNX2_EMAC_LED_10MB_OVERRIDE |
6170 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6171 BNX2_EMAC_LED_TRAFFIC);
6173 msleep_interruptible(500);
6174 if (signal_pending(current))
6177 REG_WR(bp, BNX2_EMAC_LED, 0);
6178 REG_WR(bp, BNX2_MISC_CFG, save);
6183 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6185 struct bnx2 *bp = netdev_priv(dev);
6187 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6188 return (ethtool_op_set_tx_hw_csum(dev, data));
6190 return (ethtool_op_set_tx_csum(dev, data));
6193 static const struct ethtool_ops bnx2_ethtool_ops = {
6194 .get_settings = bnx2_get_settings,
6195 .set_settings = bnx2_set_settings,
6196 .get_drvinfo = bnx2_get_drvinfo,
6197 .get_regs_len = bnx2_get_regs_len,
6198 .get_regs = bnx2_get_regs,
6199 .get_wol = bnx2_get_wol,
6200 .set_wol = bnx2_set_wol,
6201 .nway_reset = bnx2_nway_reset,
6202 .get_link = ethtool_op_get_link,
6203 .get_eeprom_len = bnx2_get_eeprom_len,
6204 .get_eeprom = bnx2_get_eeprom,
6205 .set_eeprom = bnx2_set_eeprom,
6206 .get_coalesce = bnx2_get_coalesce,
6207 .set_coalesce = bnx2_set_coalesce,
6208 .get_ringparam = bnx2_get_ringparam,
6209 .set_ringparam = bnx2_set_ringparam,
6210 .get_pauseparam = bnx2_get_pauseparam,
6211 .set_pauseparam = bnx2_set_pauseparam,
6212 .get_rx_csum = bnx2_get_rx_csum,
6213 .set_rx_csum = bnx2_set_rx_csum,
6214 .get_tx_csum = ethtool_op_get_tx_csum,
6215 .set_tx_csum = bnx2_set_tx_csum,
6216 .get_sg = ethtool_op_get_sg,
6217 .set_sg = ethtool_op_set_sg,
6218 .get_tso = ethtool_op_get_tso,
6219 .set_tso = bnx2_set_tso,
6220 .self_test_count = bnx2_self_test_count,
6221 .self_test = bnx2_self_test,
6222 .get_strings = bnx2_get_strings,
6223 .phys_id = bnx2_phys_id,
6224 .get_stats_count = bnx2_get_stats_count,
6225 .get_ethtool_stats = bnx2_get_ethtool_stats,
6226 .get_perm_addr = ethtool_op_get_perm_addr,
6229 /* Called with rtnl_lock */
6231 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6233 struct mii_ioctl_data *data = if_mii(ifr);
6234 struct bnx2 *bp = netdev_priv(dev);
6239 data->phy_id = bp->phy_addr;
6245 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6248 if (!netif_running(dev))
6251 spin_lock_bh(&bp->phy_lock);
6252 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6253 spin_unlock_bh(&bp->phy_lock);
6255 data->val_out = mii_regval;
6261 if (!capable(CAP_NET_ADMIN))
6264 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6267 if (!netif_running(dev))
6270 spin_lock_bh(&bp->phy_lock);
6271 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6272 spin_unlock_bh(&bp->phy_lock);
6283 /* Called with rtnl_lock */
6285 bnx2_change_mac_addr(struct net_device *dev, void *p)
6287 struct sockaddr *addr = p;
6288 struct bnx2 *bp = netdev_priv(dev);
6290 if (!is_valid_ether_addr(addr->sa_data))
6293 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6294 if (netif_running(dev))
6295 bnx2_set_mac_addr(bp);
6300 /* Called with rtnl_lock */
6302 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6304 struct bnx2 *bp = netdev_priv(dev);
6306 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6307 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6311 if (netif_running(dev)) {
6312 bnx2_netif_stop(bp);
6316 bnx2_netif_start(bp);
6321 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6323 poll_bnx2(struct net_device *dev)
6325 struct bnx2 *bp = netdev_priv(dev);
6327 disable_irq(bp->pdev->irq);
6328 bnx2_interrupt(bp->pdev->irq, dev);
6329 enable_irq(bp->pdev->irq);
6333 static void __devinit
6334 bnx2_get_5709_media(struct bnx2 *bp)
6336 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6337 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6340 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6342 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6343 bp->phy_flags |= PHY_SERDES_FLAG;
6347 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6348 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6350 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6352 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6357 bp->phy_flags |= PHY_SERDES_FLAG;
6365 bp->phy_flags |= PHY_SERDES_FLAG;
6371 static void __devinit
6372 bnx2_get_pci_speed(struct bnx2 *bp)
6376 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6377 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6380 bp->flags |= PCIX_FLAG;
6382 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6384 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6386 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6387 bp->bus_speed_mhz = 133;
6390 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6391 bp->bus_speed_mhz = 100;
6394 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6395 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6396 bp->bus_speed_mhz = 66;
6399 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6400 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6401 bp->bus_speed_mhz = 50;
6404 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6405 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6406 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6407 bp->bus_speed_mhz = 33;
6412 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6413 bp->bus_speed_mhz = 66;
6415 bp->bus_speed_mhz = 33;
6418 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6419 bp->flags |= PCI_32BIT_FLAG;
6423 static int __devinit
6424 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6427 unsigned long mem_len;
6430 u64 dma_mask, persist_dma_mask;
6432 SET_MODULE_OWNER(dev);
6433 SET_NETDEV_DEV(dev, &pdev->dev);
6434 bp = netdev_priv(dev);
6439 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6440 rc = pci_enable_device(pdev);
6442 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
6446 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6448 "Cannot find PCI device base address, aborting.\n");
6450 goto err_out_disable;
6453 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6455 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6456 goto err_out_disable;
6459 pci_set_master(pdev);
6461 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6462 if (bp->pm_cap == 0) {
6464 "Cannot find power management capability, aborting.\n");
6466 goto err_out_release;
6472 spin_lock_init(&bp->phy_lock);
6473 spin_lock_init(&bp->indirect_lock);
6474 INIT_WORK(&bp->reset_task, bnx2_reset_task);
6476 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6477 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6478 dev->mem_end = dev->mem_start + mem_len;
6479 dev->irq = pdev->irq;
6481 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6484 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6486 goto err_out_release;
6489 /* Configure byte swap and enable write to the reg_window registers.
6490 * Rely on CPU to do target byte swapping on big endian systems
6491 * The chip's target access swapping will not swap all accesses
6493 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6494 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6495 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6497 bnx2_set_power_state(bp, PCI_D0);
6499 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6501 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6502 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6504 "Cannot find PCIE capability, aborting.\n");
6508 bp->flags |= PCIE_FLAG;
6510 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6511 if (bp->pcix_cap == 0) {
6513 "Cannot find PCIX capability, aborting.\n");
6519 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6520 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6521 bp->flags |= MSI_CAP_FLAG;
6524 /* 5708 cannot support DMA addresses > 40-bit. */
6525 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6526 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6528 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6530 /* Configure DMA attributes. */
6531 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6532 dev->features |= NETIF_F_HIGHDMA;
6533 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6536 "pci_set_consistent_dma_mask failed, aborting.\n");
6539 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6540 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6544 if (!(bp->flags & PCIE_FLAG))
6545 bnx2_get_pci_speed(bp);
6547 /* 5706A0 may falsely detect SERR and PERR. */
6548 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6549 reg = REG_RD(bp, PCI_COMMAND);
6550 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6551 REG_WR(bp, PCI_COMMAND, reg);
6553 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6554 !(bp->flags & PCIX_FLAG)) {
6557 "5706 A1 can only be used in a PCIX bus, aborting.\n");
6561 bnx2_init_nvram(bp);
6563 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6565 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6566 BNX2_SHM_HDR_SIGNATURE_SIG) {
6567 u32 off = PCI_FUNC(pdev->devfn) << 2;
6569 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6571 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6573 /* Get the permanent MAC address. First we need to make sure the
6574 * firmware is actually running.
6576 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6578 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6579 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6580 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6585 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6587 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6588 bp->mac_addr[0] = (u8) (reg >> 8);
6589 bp->mac_addr[1] = (u8) reg;
6591 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6592 bp->mac_addr[2] = (u8) (reg >> 24);
6593 bp->mac_addr[3] = (u8) (reg >> 16);
6594 bp->mac_addr[4] = (u8) (reg >> 8);
6595 bp->mac_addr[5] = (u8) reg;
6597 bp->tx_ring_size = MAX_TX_DESC_CNT;
6598 bnx2_set_rx_ring_size(bp, 255);
6602 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6604 bp->tx_quick_cons_trip_int = 20;
6605 bp->tx_quick_cons_trip = 20;
6606 bp->tx_ticks_int = 80;
6609 bp->rx_quick_cons_trip_int = 6;
6610 bp->rx_quick_cons_trip = 6;
6611 bp->rx_ticks_int = 18;
6614 bp->stats_ticks = 1000000 & 0xffff00;
6616 bp->timer_interval = HZ;
6617 bp->current_interval = HZ;
6621 /* Disable WOL support if we are running on a SERDES chip. */
6622 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6623 bnx2_get_5709_media(bp);
6624 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6625 bp->phy_flags |= PHY_SERDES_FLAG;
6627 bp->phy_port = PORT_TP;
6628 if (bp->phy_flags & PHY_SERDES_FLAG) {
6629 bp->phy_port = PORT_FIBRE;
6630 bp->flags |= NO_WOL_FLAG;
6631 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6633 reg = REG_RD_IND(bp, bp->shmem_base +
6634 BNX2_SHARED_HW_CFG_CONFIG);
6635 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6636 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6638 bnx2_init_remote_phy(bp);
6640 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6641 CHIP_NUM(bp) == CHIP_NUM_5708)
6642 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6643 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6644 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6646 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6647 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6648 (CHIP_ID(bp) == CHIP_ID_5708_B1))
6649 bp->flags |= NO_WOL_FLAG;
6651 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6652 bp->tx_quick_cons_trip_int =
6653 bp->tx_quick_cons_trip;
6654 bp->tx_ticks_int = bp->tx_ticks;
6655 bp->rx_quick_cons_trip_int =
6656 bp->rx_quick_cons_trip;
6657 bp->rx_ticks_int = bp->rx_ticks;
6658 bp->comp_prod_trip_int = bp->comp_prod_trip;
6659 bp->com_ticks_int = bp->com_ticks;
6660 bp->cmd_ticks_int = bp->cmd_ticks;
6663 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6665 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6666 * with byte enables disabled on the unused 32-bit word. This is legal
6667 * but causes problems on the AMD 8132 which will eventually stop
6668 * responding after a while.
6670 * AMD believes this incompatibility is unique to the 5706, and
6671 * prefers to locally disable MSI rather than globally disabling it.
6673 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6674 struct pci_dev *amd_8132 = NULL;
6676 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6677 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6681 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6682 if (rev >= 0x10 && rev <= 0x13) {
6684 pci_dev_put(amd_8132);
6690 bnx2_set_default_link(bp);
6691 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6693 init_timer(&bp->timer);
6694 bp->timer.expires = RUN_AT(bp->timer_interval);
6695 bp->timer.data = (unsigned long) bp;
6696 bp->timer.function = bnx2_timer;
6702 iounmap(bp->regview);
6707 pci_release_regions(pdev);
6710 pci_disable_device(pdev);
6711 pci_set_drvdata(pdev, NULL);
6717 static char * __devinit
6718 bnx2_bus_string(struct bnx2 *bp, char *str)
6722 if (bp->flags & PCIE_FLAG) {
6723 s += sprintf(s, "PCI Express");
6725 s += sprintf(s, "PCI");
6726 if (bp->flags & PCIX_FLAG)
6727 s += sprintf(s, "-X");
6728 if (bp->flags & PCI_32BIT_FLAG)
6729 s += sprintf(s, " 32-bit");
6731 s += sprintf(s, " 64-bit");
6732 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6737 static int __devinit
6738 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6740 static int version_printed = 0;
6741 struct net_device *dev = NULL;
6746 if (version_printed++ == 0)
6747 printk(KERN_INFO "%s", version);
6749 /* dev zeroed in init_etherdev */
6750 dev = alloc_etherdev(sizeof(*bp));
6755 rc = bnx2_init_board(pdev, dev);
6761 dev->open = bnx2_open;
6762 dev->hard_start_xmit = bnx2_start_xmit;
6763 dev->stop = bnx2_close;
6764 dev->get_stats = bnx2_get_stats;
6765 dev->set_multicast_list = bnx2_set_rx_mode;
6766 dev->do_ioctl = bnx2_ioctl;
6767 dev->set_mac_address = bnx2_change_mac_addr;
6768 dev->change_mtu = bnx2_change_mtu;
6769 dev->tx_timeout = bnx2_tx_timeout;
6770 dev->watchdog_timeo = TX_TIMEOUT;
6772 dev->vlan_rx_register = bnx2_vlan_rx_register;
6774 dev->poll = bnx2_poll;
6775 dev->ethtool_ops = &bnx2_ethtool_ops;
6778 bp = netdev_priv(dev);
6780 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6781 dev->poll_controller = poll_bnx2;
6784 pci_set_drvdata(pdev, dev);
6786 memcpy(dev->dev_addr, bp->mac_addr, 6);
6787 memcpy(dev->perm_addr, bp->mac_addr, 6);
6788 bp->name = board_info[ent->driver_data].name;
6790 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6791 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6792 dev->features |= NETIF_F_IPV6_CSUM;
6795 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6797 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6798 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6799 dev->features |= NETIF_F_TSO6;
6801 if ((rc = register_netdev(dev))) {
6802 dev_err(&pdev->dev, "Cannot register net device\n");
6804 iounmap(bp->regview);
6805 pci_release_regions(pdev);
6806 pci_disable_device(pdev);
6807 pci_set_drvdata(pdev, NULL);
6812 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6816 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6817 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6818 bnx2_bus_string(bp, str),
6822 printk("node addr ");
6823 for (i = 0; i < 6; i++)
6824 printk("%2.2x", dev->dev_addr[i]);
6830 static void __devexit
6831 bnx2_remove_one(struct pci_dev *pdev)
6833 struct net_device *dev = pci_get_drvdata(pdev);
6834 struct bnx2 *bp = netdev_priv(dev);
6836 flush_scheduled_work();
6838 unregister_netdev(dev);
6841 iounmap(bp->regview);
6844 pci_release_regions(pdev);
6845 pci_disable_device(pdev);
6846 pci_set_drvdata(pdev, NULL);
6850 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6852 struct net_device *dev = pci_get_drvdata(pdev);
6853 struct bnx2 *bp = netdev_priv(dev);
6856 if (!netif_running(dev))
6859 flush_scheduled_work();
6860 bnx2_netif_stop(bp);
6861 netif_device_detach(dev);
6862 del_timer_sync(&bp->timer);
6863 if (bp->flags & NO_WOL_FLAG)
6864 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6866 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6868 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6869 bnx2_reset_chip(bp, reset_code);
6871 pci_save_state(pdev);
6872 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6877 bnx2_resume(struct pci_dev *pdev)
6879 struct net_device *dev = pci_get_drvdata(pdev);
6880 struct bnx2 *bp = netdev_priv(dev);
6882 if (!netif_running(dev))
6885 pci_restore_state(pdev);
6886 bnx2_set_power_state(bp, PCI_D0);
6887 netif_device_attach(dev);
6889 bnx2_netif_start(bp);
6893 static struct pci_driver bnx2_pci_driver = {
6894 .name = DRV_MODULE_NAME,
6895 .id_table = bnx2_pci_tbl,
6896 .probe = bnx2_init_one,
6897 .remove = __devexit_p(bnx2_remove_one),
6898 .suspend = bnx2_suspend,
6899 .resume = bnx2_resume,
6902 static int __init bnx2_init(void)
6904 return pci_register_driver(&bnx2_pci_driver);
6907 static void __exit bnx2_cleanup(void)
6909 pci_unregister_driver(&bnx2_pci_driver);
6912 module_init(bnx2_init);
6913 module_exit(bnx2_cleanup);