1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
45 #include <net/checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/prefetch.h>
51 #include <linux/cache.h>
52 #include <linux/zlib.h>
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.4.45"
60 #define DRV_MODULE_RELDATE "September 29, 2006"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static const char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90 /* indexed by board_t, above */
93 } board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 static struct pci_device_id bnx2_pci_tbl[] = {
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
121 static struct flash_spec flash_table[] =
124 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
125 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
126 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
128 /* Expansion entry 0001 */
129 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
130 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
131 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
133 /* Saifun SA25F010 (non-buffered flash) */
134 /* strap, cfg1, & write1 need updates */
135 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
138 "Non-buffered flash (128kB)"},
139 /* Saifun SA25F020 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
141 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
144 "Non-buffered flash (256kB)"},
145 /* Expansion entry 0100 */
146 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
147 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
150 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
151 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
152 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
153 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
154 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
155 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
156 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
157 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
159 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
160 /* Saifun SA25F005 (non-buffered flash) */
161 /* strap, cfg1, & write1 need updates */
162 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
163 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
165 "Non-buffered flash (64kB)"},
167 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
168 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
169 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
171 /* Expansion entry 1001 */
172 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
173 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
176 /* Expansion entry 1010 */
177 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 /* ATMEL AT45DB011B (buffered flash) */
182 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
183 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
184 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
185 "Buffered flash (128kB)"},
186 /* Expansion entry 1100 */
187 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
188 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 /* Expansion entry 1101 */
192 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 /* Ateml Expansion entry 1110 */
197 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
198 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
199 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1110 (Atmel)"},
201 /* ATMEL AT45DB021B (buffered flash) */
202 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
205 "Buffered flash (256kB)"},
208 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
210 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
215 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
216 if (diff > MAX_TX_DESC_CNT)
217 diff = (diff & MAX_TX_DESC_CNT) - 1;
218 return (bp->tx_ring_size - diff);
222 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
224 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
225 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
229 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
231 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
232 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
236 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
239 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
240 REG_WR(bp, BNX2_CTX_DATA, val);
244 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
249 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
250 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
251 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
253 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
254 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
259 val1 = (bp->phy_addr << 21) | (reg << 16) |
260 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
261 BNX2_EMAC_MDIO_COMM_START_BUSY;
262 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
264 for (i = 0; i < 50; i++) {
267 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
268 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
271 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
272 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
278 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
287 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
288 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
289 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
291 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
292 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
306 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
307 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
308 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
310 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
311 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
316 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
317 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
318 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
319 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
321 for (i = 0; i < 50; i++) {
324 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
325 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
331 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
336 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
337 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
338 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
341 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
350 bnx2_disable_int(struct bnx2 *bp)
352 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
353 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
354 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
358 bnx2_enable_int(struct bnx2 *bp)
360 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
361 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
362 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
364 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
365 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
367 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
371 bnx2_disable_int_sync(struct bnx2 *bp)
373 atomic_inc(&bp->intr_sem);
374 bnx2_disable_int(bp);
375 synchronize_irq(bp->pdev->irq);
379 bnx2_netif_stop(struct bnx2 *bp)
381 bnx2_disable_int_sync(bp);
382 if (netif_running(bp->dev)) {
383 netif_poll_disable(bp->dev);
384 netif_tx_disable(bp->dev);
385 bp->dev->trans_start = jiffies; /* prevent tx timeout */
390 bnx2_netif_start(struct bnx2 *bp)
392 if (atomic_dec_and_test(&bp->intr_sem)) {
393 if (netif_running(bp->dev)) {
394 netif_wake_queue(bp->dev);
395 netif_poll_enable(bp->dev);
402 bnx2_free_mem(struct bnx2 *bp)
406 if (bp->status_blk) {
407 pci_free_consistent(bp->pdev, bp->status_stats_size,
408 bp->status_blk, bp->status_blk_mapping);
409 bp->status_blk = NULL;
410 bp->stats_blk = NULL;
412 if (bp->tx_desc_ring) {
413 pci_free_consistent(bp->pdev,
414 sizeof(struct tx_bd) * TX_DESC_CNT,
415 bp->tx_desc_ring, bp->tx_desc_mapping);
416 bp->tx_desc_ring = NULL;
418 kfree(bp->tx_buf_ring);
419 bp->tx_buf_ring = NULL;
420 for (i = 0; i < bp->rx_max_ring; i++) {
421 if (bp->rx_desc_ring[i])
422 pci_free_consistent(bp->pdev,
423 sizeof(struct rx_bd) * RX_DESC_CNT,
425 bp->rx_desc_mapping[i]);
426 bp->rx_desc_ring[i] = NULL;
428 vfree(bp->rx_buf_ring);
429 bp->rx_buf_ring = NULL;
433 bnx2_alloc_mem(struct bnx2 *bp)
435 int i, status_blk_size;
437 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
439 if (bp->tx_buf_ring == NULL)
442 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
443 sizeof(struct tx_bd) *
445 &bp->tx_desc_mapping);
446 if (bp->tx_desc_ring == NULL)
449 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
451 if (bp->rx_buf_ring == NULL)
454 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
457 for (i = 0; i < bp->rx_max_ring; i++) {
458 bp->rx_desc_ring[i] =
459 pci_alloc_consistent(bp->pdev,
460 sizeof(struct rx_bd) * RX_DESC_CNT,
461 &bp->rx_desc_mapping[i]);
462 if (bp->rx_desc_ring[i] == NULL)
467 /* Combine status and statistics blocks into one allocation. */
468 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
469 bp->status_stats_size = status_blk_size +
470 sizeof(struct statistics_block);
472 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
473 &bp->status_blk_mapping);
474 if (bp->status_blk == NULL)
477 memset(bp->status_blk, 0, bp->status_stats_size);
479 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
482 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
492 bnx2_report_fw_link(struct bnx2 *bp)
494 u32 fw_link_status = 0;
499 switch (bp->line_speed) {
501 if (bp->duplex == DUPLEX_HALF)
502 fw_link_status = BNX2_LINK_STATUS_10HALF;
504 fw_link_status = BNX2_LINK_STATUS_10FULL;
507 if (bp->duplex == DUPLEX_HALF)
508 fw_link_status = BNX2_LINK_STATUS_100HALF;
510 fw_link_status = BNX2_LINK_STATUS_100FULL;
513 if (bp->duplex == DUPLEX_HALF)
514 fw_link_status = BNX2_LINK_STATUS_1000HALF;
516 fw_link_status = BNX2_LINK_STATUS_1000FULL;
519 if (bp->duplex == DUPLEX_HALF)
520 fw_link_status = BNX2_LINK_STATUS_2500HALF;
522 fw_link_status = BNX2_LINK_STATUS_2500FULL;
526 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
529 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
531 bnx2_read_phy(bp, MII_BMSR, &bmsr);
532 bnx2_read_phy(bp, MII_BMSR, &bmsr);
534 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
535 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
536 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
538 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
542 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
544 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
548 bnx2_report_link(struct bnx2 *bp)
551 netif_carrier_on(bp->dev);
552 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
554 printk("%d Mbps ", bp->line_speed);
556 if (bp->duplex == DUPLEX_FULL)
557 printk("full duplex");
559 printk("half duplex");
562 if (bp->flow_ctrl & FLOW_CTRL_RX) {
563 printk(", receive ");
564 if (bp->flow_ctrl & FLOW_CTRL_TX)
565 printk("& transmit ");
568 printk(", transmit ");
570 printk("flow control ON");
575 netif_carrier_off(bp->dev);
576 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
579 bnx2_report_fw_link(bp);
583 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
585 u32 local_adv, remote_adv;
588 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
589 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
591 if (bp->duplex == DUPLEX_FULL) {
592 bp->flow_ctrl = bp->req_flow_ctrl;
597 if (bp->duplex != DUPLEX_FULL) {
601 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
602 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
605 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
606 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
607 bp->flow_ctrl |= FLOW_CTRL_TX;
608 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
609 bp->flow_ctrl |= FLOW_CTRL_RX;
613 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
614 bnx2_read_phy(bp, MII_LPA, &remote_adv);
616 if (bp->phy_flags & PHY_SERDES_FLAG) {
617 u32 new_local_adv = 0;
618 u32 new_remote_adv = 0;
620 if (local_adv & ADVERTISE_1000XPAUSE)
621 new_local_adv |= ADVERTISE_PAUSE_CAP;
622 if (local_adv & ADVERTISE_1000XPSE_ASYM)
623 new_local_adv |= ADVERTISE_PAUSE_ASYM;
624 if (remote_adv & ADVERTISE_1000XPAUSE)
625 new_remote_adv |= ADVERTISE_PAUSE_CAP;
626 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
627 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
629 local_adv = new_local_adv;
630 remote_adv = new_remote_adv;
633 /* See Table 28B-3 of 802.3ab-1999 spec. */
634 if (local_adv & ADVERTISE_PAUSE_CAP) {
635 if(local_adv & ADVERTISE_PAUSE_ASYM) {
636 if (remote_adv & ADVERTISE_PAUSE_CAP) {
637 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
639 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
640 bp->flow_ctrl = FLOW_CTRL_RX;
644 if (remote_adv & ADVERTISE_PAUSE_CAP) {
645 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
649 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
650 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
651 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
653 bp->flow_ctrl = FLOW_CTRL_TX;
659 bnx2_5708s_linkup(struct bnx2 *bp)
664 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
665 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
666 case BCM5708S_1000X_STAT1_SPEED_10:
667 bp->line_speed = SPEED_10;
669 case BCM5708S_1000X_STAT1_SPEED_100:
670 bp->line_speed = SPEED_100;
672 case BCM5708S_1000X_STAT1_SPEED_1G:
673 bp->line_speed = SPEED_1000;
675 case BCM5708S_1000X_STAT1_SPEED_2G5:
676 bp->line_speed = SPEED_2500;
679 if (val & BCM5708S_1000X_STAT1_FD)
680 bp->duplex = DUPLEX_FULL;
682 bp->duplex = DUPLEX_HALF;
688 bnx2_5706s_linkup(struct bnx2 *bp)
690 u32 bmcr, local_adv, remote_adv, common;
693 bp->line_speed = SPEED_1000;
695 bnx2_read_phy(bp, MII_BMCR, &bmcr);
696 if (bmcr & BMCR_FULLDPLX) {
697 bp->duplex = DUPLEX_FULL;
700 bp->duplex = DUPLEX_HALF;
703 if (!(bmcr & BMCR_ANENABLE)) {
707 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
708 bnx2_read_phy(bp, MII_LPA, &remote_adv);
710 common = local_adv & remote_adv;
711 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
713 if (common & ADVERTISE_1000XFULL) {
714 bp->duplex = DUPLEX_FULL;
717 bp->duplex = DUPLEX_HALF;
725 bnx2_copper_linkup(struct bnx2 *bp)
729 bnx2_read_phy(bp, MII_BMCR, &bmcr);
730 if (bmcr & BMCR_ANENABLE) {
731 u32 local_adv, remote_adv, common;
733 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
734 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
736 common = local_adv & (remote_adv >> 2);
737 if (common & ADVERTISE_1000FULL) {
738 bp->line_speed = SPEED_1000;
739 bp->duplex = DUPLEX_FULL;
741 else if (common & ADVERTISE_1000HALF) {
742 bp->line_speed = SPEED_1000;
743 bp->duplex = DUPLEX_HALF;
746 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
747 bnx2_read_phy(bp, MII_LPA, &remote_adv);
749 common = local_adv & remote_adv;
750 if (common & ADVERTISE_100FULL) {
751 bp->line_speed = SPEED_100;
752 bp->duplex = DUPLEX_FULL;
754 else if (common & ADVERTISE_100HALF) {
755 bp->line_speed = SPEED_100;
756 bp->duplex = DUPLEX_HALF;
758 else if (common & ADVERTISE_10FULL) {
759 bp->line_speed = SPEED_10;
760 bp->duplex = DUPLEX_FULL;
762 else if (common & ADVERTISE_10HALF) {
763 bp->line_speed = SPEED_10;
764 bp->duplex = DUPLEX_HALF;
773 if (bmcr & BMCR_SPEED100) {
774 bp->line_speed = SPEED_100;
777 bp->line_speed = SPEED_10;
779 if (bmcr & BMCR_FULLDPLX) {
780 bp->duplex = DUPLEX_FULL;
783 bp->duplex = DUPLEX_HALF;
791 bnx2_set_mac_link(struct bnx2 *bp)
795 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
796 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
797 (bp->duplex == DUPLEX_HALF)) {
798 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
801 /* Configure the EMAC mode register. */
802 val = REG_RD(bp, BNX2_EMAC_MODE);
804 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
805 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
809 switch (bp->line_speed) {
811 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
812 val |= BNX2_EMAC_MODE_PORT_MII_10;
817 val |= BNX2_EMAC_MODE_PORT_MII;
820 val |= BNX2_EMAC_MODE_25G;
823 val |= BNX2_EMAC_MODE_PORT_GMII;
828 val |= BNX2_EMAC_MODE_PORT_GMII;
831 /* Set the MAC to operate in the appropriate duplex mode. */
832 if (bp->duplex == DUPLEX_HALF)
833 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
834 REG_WR(bp, BNX2_EMAC_MODE, val);
836 /* Enable/disable rx PAUSE. */
837 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
839 if (bp->flow_ctrl & FLOW_CTRL_RX)
840 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
841 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
843 /* Enable/disable tx PAUSE. */
844 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
845 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
847 if (bp->flow_ctrl & FLOW_CTRL_TX)
848 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
849 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
851 /* Acknowledge the interrupt. */
852 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
858 bnx2_set_link(struct bnx2 *bp)
863 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
868 link_up = bp->link_up;
870 bnx2_read_phy(bp, MII_BMSR, &bmsr);
871 bnx2_read_phy(bp, MII_BMSR, &bmsr);
873 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
874 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
877 val = REG_RD(bp, BNX2_EMAC_STATUS);
878 if (val & BNX2_EMAC_STATUS_LINK)
879 bmsr |= BMSR_LSTATUS;
881 bmsr &= ~BMSR_LSTATUS;
884 if (bmsr & BMSR_LSTATUS) {
887 if (bp->phy_flags & PHY_SERDES_FLAG) {
888 if (CHIP_NUM(bp) == CHIP_NUM_5706)
889 bnx2_5706s_linkup(bp);
890 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
891 bnx2_5708s_linkup(bp);
894 bnx2_copper_linkup(bp);
896 bnx2_resolve_flow_ctrl(bp);
899 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
900 (bp->autoneg & AUTONEG_SPEED)) {
904 bnx2_read_phy(bp, MII_BMCR, &bmcr);
905 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
906 if (!(bmcr & BMCR_ANENABLE)) {
907 bnx2_write_phy(bp, MII_BMCR, bmcr |
911 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
915 if (bp->link_up != link_up) {
916 bnx2_report_link(bp);
919 bnx2_set_mac_link(bp);
925 bnx2_reset_phy(struct bnx2 *bp)
930 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
932 #define PHY_RESET_MAX_WAIT 100
933 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
936 bnx2_read_phy(bp, MII_BMCR, ®);
937 if (!(reg & BMCR_RESET)) {
942 if (i == PHY_RESET_MAX_WAIT) {
949 bnx2_phy_get_pause_adv(struct bnx2 *bp)
953 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
954 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
956 if (bp->phy_flags & PHY_SERDES_FLAG) {
957 adv = ADVERTISE_1000XPAUSE;
960 adv = ADVERTISE_PAUSE_CAP;
963 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
964 if (bp->phy_flags & PHY_SERDES_FLAG) {
965 adv = ADVERTISE_1000XPSE_ASYM;
968 adv = ADVERTISE_PAUSE_ASYM;
971 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
972 if (bp->phy_flags & PHY_SERDES_FLAG) {
973 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
976 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
983 bnx2_setup_serdes_phy(struct bnx2 *bp)
988 if (!(bp->autoneg & AUTONEG_SPEED)) {
990 int force_link_down = 0;
992 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
993 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
995 bnx2_read_phy(bp, MII_BMCR, &bmcr);
996 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
997 new_bmcr |= BMCR_SPEED1000;
998 if (bp->req_line_speed == SPEED_2500) {
999 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1000 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1001 if (!(up1 & BCM5708S_UP1_2G5)) {
1002 up1 |= BCM5708S_UP1_2G5;
1003 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1004 force_link_down = 1;
1006 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1007 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1008 if (up1 & BCM5708S_UP1_2G5) {
1009 up1 &= ~BCM5708S_UP1_2G5;
1010 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1011 force_link_down = 1;
1015 if (bp->req_duplex == DUPLEX_FULL) {
1016 adv |= ADVERTISE_1000XFULL;
1017 new_bmcr |= BMCR_FULLDPLX;
1020 adv |= ADVERTISE_1000XHALF;
1021 new_bmcr &= ~BMCR_FULLDPLX;
1023 if ((new_bmcr != bmcr) || (force_link_down)) {
1024 /* Force a link down visible on the other side */
1026 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1027 ~(ADVERTISE_1000XFULL |
1028 ADVERTISE_1000XHALF));
1029 bnx2_write_phy(bp, MII_BMCR, bmcr |
1030 BMCR_ANRESTART | BMCR_ANENABLE);
1033 netif_carrier_off(bp->dev);
1034 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1035 bnx2_report_link(bp);
1037 bnx2_write_phy(bp, MII_ADVERTISE, adv);
1038 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1043 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1044 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1045 up1 |= BCM5708S_UP1_2G5;
1046 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1049 if (bp->advertising & ADVERTISED_1000baseT_Full)
1050 new_adv |= ADVERTISE_1000XFULL;
1052 new_adv |= bnx2_phy_get_pause_adv(bp);
1054 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1055 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1057 bp->serdes_an_pending = 0;
1058 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1059 /* Force a link down visible on the other side */
1061 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1062 spin_unlock_bh(&bp->phy_lock);
1064 spin_lock_bh(&bp->phy_lock);
1067 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1068 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1070 /* Speed up link-up time when the link partner
1071 * does not autonegotiate which is very common
1072 * in blade servers. Some blade servers use
1073 * IPMI for kerboard input and it's important
1074 * to minimize link disruptions. Autoneg. involves
1075 * exchanging base pages plus 3 next pages and
1076 * normally completes in about 120 msec.
1078 bp->current_interval = SERDES_AN_TIMEOUT;
1079 bp->serdes_an_pending = 1;
1080 mod_timer(&bp->timer, jiffies + bp->current_interval);
1086 #define ETHTOOL_ALL_FIBRE_SPEED \
1087 (ADVERTISED_1000baseT_Full)
1089 #define ETHTOOL_ALL_COPPER_SPEED \
1090 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1091 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1092 ADVERTISED_1000baseT_Full)
1094 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1095 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1097 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1100 bnx2_setup_copper_phy(struct bnx2 *bp)
1105 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1107 if (bp->autoneg & AUTONEG_SPEED) {
1108 u32 adv_reg, adv1000_reg;
1109 u32 new_adv_reg = 0;
1110 u32 new_adv1000_reg = 0;
1112 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1113 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1114 ADVERTISE_PAUSE_ASYM);
1116 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1117 adv1000_reg &= PHY_ALL_1000_SPEED;
1119 if (bp->advertising & ADVERTISED_10baseT_Half)
1120 new_adv_reg |= ADVERTISE_10HALF;
1121 if (bp->advertising & ADVERTISED_10baseT_Full)
1122 new_adv_reg |= ADVERTISE_10FULL;
1123 if (bp->advertising & ADVERTISED_100baseT_Half)
1124 new_adv_reg |= ADVERTISE_100HALF;
1125 if (bp->advertising & ADVERTISED_100baseT_Full)
1126 new_adv_reg |= ADVERTISE_100FULL;
1127 if (bp->advertising & ADVERTISED_1000baseT_Full)
1128 new_adv1000_reg |= ADVERTISE_1000FULL;
1130 new_adv_reg |= ADVERTISE_CSMA;
1132 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1134 if ((adv1000_reg != new_adv1000_reg) ||
1135 (adv_reg != new_adv_reg) ||
1136 ((bmcr & BMCR_ANENABLE) == 0)) {
1138 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1139 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1140 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1143 else if (bp->link_up) {
1144 /* Flow ctrl may have changed from auto to forced */
1145 /* or vice-versa. */
1147 bnx2_resolve_flow_ctrl(bp);
1148 bnx2_set_mac_link(bp);
1154 if (bp->req_line_speed == SPEED_100) {
1155 new_bmcr |= BMCR_SPEED100;
1157 if (bp->req_duplex == DUPLEX_FULL) {
1158 new_bmcr |= BMCR_FULLDPLX;
1160 if (new_bmcr != bmcr) {
1163 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1164 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1166 if (bmsr & BMSR_LSTATUS) {
1167 /* Force link down */
1168 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1169 spin_unlock_bh(&bp->phy_lock);
1171 spin_lock_bh(&bp->phy_lock);
1173 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1174 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1177 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1179 /* Normally, the new speed is setup after the link has
1180 * gone down and up again. In some cases, link will not go
1181 * down so we need to set up the new speed here.
1183 if (bmsr & BMSR_LSTATUS) {
1184 bp->line_speed = bp->req_line_speed;
1185 bp->duplex = bp->req_duplex;
1186 bnx2_resolve_flow_ctrl(bp);
1187 bnx2_set_mac_link(bp);
1194 bnx2_setup_phy(struct bnx2 *bp)
1196 if (bp->loopback == MAC_LOOPBACK)
1199 if (bp->phy_flags & PHY_SERDES_FLAG) {
1200 return (bnx2_setup_serdes_phy(bp));
1203 return (bnx2_setup_copper_phy(bp));
1208 bnx2_init_5708s_phy(struct bnx2 *bp)
1212 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1213 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1214 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1216 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1217 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1218 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1220 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1221 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1222 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1224 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1225 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1226 val |= BCM5708S_UP1_2G5;
1227 bnx2_write_phy(bp, BCM5708S_UP1, val);
1230 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1231 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1232 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1233 /* increase tx signal amplitude */
1234 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1235 BCM5708S_BLK_ADDR_TX_MISC);
1236 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1237 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1238 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1239 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1242 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1243 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1248 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1249 BNX2_SHARED_HW_CFG_CONFIG);
1250 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1251 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1252 BCM5708S_BLK_ADDR_TX_MISC);
1253 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1254 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1255 BCM5708S_BLK_ADDR_DIG);
1262 bnx2_init_5706s_phy(struct bnx2 *bp)
1264 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1266 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1267 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1270 if (bp->dev->mtu > 1500) {
1273 /* Set extended packet length bit */
1274 bnx2_write_phy(bp, 0x18, 0x7);
1275 bnx2_read_phy(bp, 0x18, &val);
1276 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1278 bnx2_write_phy(bp, 0x1c, 0x6c00);
1279 bnx2_read_phy(bp, 0x1c, &val);
1280 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1285 bnx2_write_phy(bp, 0x18, 0x7);
1286 bnx2_read_phy(bp, 0x18, &val);
1287 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1289 bnx2_write_phy(bp, 0x1c, 0x6c00);
1290 bnx2_read_phy(bp, 0x1c, &val);
1291 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1298 bnx2_init_copper_phy(struct bnx2 *bp)
1302 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1304 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1305 bnx2_write_phy(bp, 0x18, 0x0c00);
1306 bnx2_write_phy(bp, 0x17, 0x000a);
1307 bnx2_write_phy(bp, 0x15, 0x310b);
1308 bnx2_write_phy(bp, 0x17, 0x201f);
1309 bnx2_write_phy(bp, 0x15, 0x9506);
1310 bnx2_write_phy(bp, 0x17, 0x401f);
1311 bnx2_write_phy(bp, 0x15, 0x14e2);
1312 bnx2_write_phy(bp, 0x18, 0x0400);
1315 if (bp->dev->mtu > 1500) {
1316 /* Set extended packet length bit */
1317 bnx2_write_phy(bp, 0x18, 0x7);
1318 bnx2_read_phy(bp, 0x18, &val);
1319 bnx2_write_phy(bp, 0x18, val | 0x4000);
1321 bnx2_read_phy(bp, 0x10, &val);
1322 bnx2_write_phy(bp, 0x10, val | 0x1);
1325 bnx2_write_phy(bp, 0x18, 0x7);
1326 bnx2_read_phy(bp, 0x18, &val);
1327 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1329 bnx2_read_phy(bp, 0x10, &val);
1330 bnx2_write_phy(bp, 0x10, val & ~0x1);
1333 /* ethernet@wirespeed */
1334 bnx2_write_phy(bp, 0x18, 0x7007);
1335 bnx2_read_phy(bp, 0x18, &val);
1336 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1342 bnx2_init_phy(struct bnx2 *bp)
1347 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1348 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1350 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1354 bnx2_read_phy(bp, MII_PHYSID1, &val);
1355 bp->phy_id = val << 16;
1356 bnx2_read_phy(bp, MII_PHYSID2, &val);
1357 bp->phy_id |= val & 0xffff;
1359 if (bp->phy_flags & PHY_SERDES_FLAG) {
1360 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1361 rc = bnx2_init_5706s_phy(bp);
1362 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1363 rc = bnx2_init_5708s_phy(bp);
1366 rc = bnx2_init_copper_phy(bp);
1375 bnx2_set_mac_loopback(struct bnx2 *bp)
1379 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1380 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1381 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1382 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1387 static int bnx2_test_link(struct bnx2 *);
1390 bnx2_set_phy_loopback(struct bnx2 *bp)
1395 spin_lock_bh(&bp->phy_lock);
1396 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1398 spin_unlock_bh(&bp->phy_lock);
1402 for (i = 0; i < 10; i++) {
1403 if (bnx2_test_link(bp) == 0)
1408 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1409 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1410 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1411 BNX2_EMAC_MODE_25G);
1413 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1414 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1420 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1426 msg_data |= bp->fw_wr_seq;
1428 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1430 /* wait for an acknowledgement. */
1431 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1434 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1436 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1439 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1442 /* If we timed out, inform the firmware that this is the case. */
1443 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1445 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1448 msg_data &= ~BNX2_DRV_MSG_CODE;
1449 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1451 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1456 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1463 bnx2_init_context(struct bnx2 *bp)
1469 u32 vcid_addr, pcid_addr, offset;
1473 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1476 vcid_addr = GET_PCID_ADDR(vcid);
1478 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1483 pcid_addr = GET_PCID_ADDR(new_vcid);
1486 vcid_addr = GET_CID_ADDR(vcid);
1487 pcid_addr = vcid_addr;
1490 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1491 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1493 /* Zero out the context. */
1494 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1495 CTX_WR(bp, 0x00, offset, 0);
1498 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1499 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1504 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1510 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1511 if (good_mbuf == NULL) {
1512 printk(KERN_ERR PFX "Failed to allocate memory in "
1513 "bnx2_alloc_bad_rbuf\n");
1517 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1518 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1522 /* Allocate a bunch of mbufs and save the good ones in an array. */
1523 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1524 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1525 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1527 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1529 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1531 /* The addresses with Bit 9 set are bad memory blocks. */
1532 if (!(val & (1 << 9))) {
1533 good_mbuf[good_mbuf_cnt] = (u16) val;
1537 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1540 /* Free the good ones back to the mbuf pool thus discarding
1541 * all the bad ones. */
1542 while (good_mbuf_cnt) {
1545 val = good_mbuf[good_mbuf_cnt];
1546 val = (val << 9) | val | 1;
1548 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1555 bnx2_set_mac_addr(struct bnx2 *bp)
1558 u8 *mac_addr = bp->dev->dev_addr;
1560 val = (mac_addr[0] << 8) | mac_addr[1];
1562 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1564 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1565 (mac_addr[4] << 8) | mac_addr[5];
1567 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1571 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1573 struct sk_buff *skb;
1574 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1576 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1577 unsigned long align;
1579 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1584 if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1585 skb_reserve(skb, 8 - align);
1588 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1589 PCI_DMA_FROMDEVICE);
1592 pci_unmap_addr_set(rx_buf, mapping, mapping);
1594 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1595 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1597 bp->rx_prod_bseq += bp->rx_buf_use_size;
1603 bnx2_phy_int(struct bnx2 *bp)
1605 u32 new_link_state, old_link_state;
1607 new_link_state = bp->status_blk->status_attn_bits &
1608 STATUS_ATTN_BITS_LINK_STATE;
1609 old_link_state = bp->status_blk->status_attn_bits_ack &
1610 STATUS_ATTN_BITS_LINK_STATE;
1611 if (new_link_state != old_link_state) {
1612 if (new_link_state) {
1613 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1614 STATUS_ATTN_BITS_LINK_STATE);
1617 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1618 STATUS_ATTN_BITS_LINK_STATE);
1625 bnx2_tx_int(struct bnx2 *bp)
1627 struct status_block *sblk = bp->status_blk;
1628 u16 hw_cons, sw_cons, sw_ring_cons;
1631 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1632 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1635 sw_cons = bp->tx_cons;
1637 while (sw_cons != hw_cons) {
1638 struct sw_bd *tx_buf;
1639 struct sk_buff *skb;
1642 sw_ring_cons = TX_RING_IDX(sw_cons);
1644 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1647 /* partial BD completions possible with TSO packets */
1648 if (skb_is_gso(skb)) {
1649 u16 last_idx, last_ring_idx;
1651 last_idx = sw_cons +
1652 skb_shinfo(skb)->nr_frags + 1;
1653 last_ring_idx = sw_ring_cons +
1654 skb_shinfo(skb)->nr_frags + 1;
1655 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1658 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1663 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1664 skb_headlen(skb), PCI_DMA_TODEVICE);
1667 last = skb_shinfo(skb)->nr_frags;
1669 for (i = 0; i < last; i++) {
1670 sw_cons = NEXT_TX_BD(sw_cons);
1672 pci_unmap_page(bp->pdev,
1674 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1676 skb_shinfo(skb)->frags[i].size,
1680 sw_cons = NEXT_TX_BD(sw_cons);
1682 tx_free_bd += last + 1;
1686 hw_cons = bp->hw_tx_cons =
1687 sblk->status_tx_quick_consumer_index0;
1689 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1694 bp->tx_cons = sw_cons;
1695 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1696 * before checking for netif_queue_stopped(). Without the
1697 * memory barrier, there is a small possibility that bnx2_start_xmit()
1698 * will miss it and cause the queue to be stopped forever.
1702 if (unlikely(netif_queue_stopped(bp->dev)) &&
1703 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1704 netif_tx_lock(bp->dev);
1705 if ((netif_queue_stopped(bp->dev)) &&
1706 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1707 netif_wake_queue(bp->dev);
1708 netif_tx_unlock(bp->dev);
1713 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1716 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1717 struct rx_bd *cons_bd, *prod_bd;
1719 cons_rx_buf = &bp->rx_buf_ring[cons];
1720 prod_rx_buf = &bp->rx_buf_ring[prod];
1722 pci_dma_sync_single_for_device(bp->pdev,
1723 pci_unmap_addr(cons_rx_buf, mapping),
1724 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1726 bp->rx_prod_bseq += bp->rx_buf_use_size;
1728 prod_rx_buf->skb = skb;
1733 pci_unmap_addr_set(prod_rx_buf, mapping,
1734 pci_unmap_addr(cons_rx_buf, mapping));
1736 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1737 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1738 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1739 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1743 bnx2_rx_int(struct bnx2 *bp, int budget)
1745 struct status_block *sblk = bp->status_blk;
1746 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1747 struct l2_fhdr *rx_hdr;
1750 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1751 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1754 sw_cons = bp->rx_cons;
1755 sw_prod = bp->rx_prod;
1757 /* Memory barrier necessary as speculative reads of the rx
1758 * buffer can be ahead of the index in the status block
1761 while (sw_cons != hw_cons) {
1764 struct sw_bd *rx_buf;
1765 struct sk_buff *skb;
1766 dma_addr_t dma_addr;
1768 sw_ring_cons = RX_RING_IDX(sw_cons);
1769 sw_ring_prod = RX_RING_IDX(sw_prod);
1771 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1776 dma_addr = pci_unmap_addr(rx_buf, mapping);
1778 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1779 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1781 rx_hdr = (struct l2_fhdr *) skb->data;
1782 len = rx_hdr->l2_fhdr_pkt_len - 4;
1784 if ((status = rx_hdr->l2_fhdr_status) &
1785 (L2_FHDR_ERRORS_BAD_CRC |
1786 L2_FHDR_ERRORS_PHY_DECODE |
1787 L2_FHDR_ERRORS_ALIGNMENT |
1788 L2_FHDR_ERRORS_TOO_SHORT |
1789 L2_FHDR_ERRORS_GIANT_FRAME)) {
1794 /* Since we don't have a jumbo ring, copy small packets
1797 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1798 struct sk_buff *new_skb;
1800 new_skb = netdev_alloc_skb(bp->dev, len + 2);
1801 if (new_skb == NULL)
1805 memcpy(new_skb->data,
1806 skb->data + bp->rx_offset - 2,
1809 skb_reserve(new_skb, 2);
1810 skb_put(new_skb, len);
1812 bnx2_reuse_rx_skb(bp, skb,
1813 sw_ring_cons, sw_ring_prod);
1817 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1818 pci_unmap_single(bp->pdev, dma_addr,
1819 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1821 skb_reserve(skb, bp->rx_offset);
1826 bnx2_reuse_rx_skb(bp, skb,
1827 sw_ring_cons, sw_ring_prod);
1831 skb->protocol = eth_type_trans(skb, bp->dev);
1833 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1834 (ntohs(skb->protocol) != 0x8100)) {
1841 skb->ip_summed = CHECKSUM_NONE;
1843 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1844 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1846 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1847 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1848 skb->ip_summed = CHECKSUM_UNNECESSARY;
1852 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1853 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1854 rx_hdr->l2_fhdr_vlan_tag);
1858 netif_receive_skb(skb);
1860 bp->dev->last_rx = jiffies;
1864 sw_cons = NEXT_RX_BD(sw_cons);
1865 sw_prod = NEXT_RX_BD(sw_prod);
1867 if ((rx_pkt == budget))
1870 /* Refresh hw_cons to see if there is new work */
1871 if (sw_cons == hw_cons) {
1872 hw_cons = bp->hw_rx_cons =
1873 sblk->status_rx_quick_consumer_index0;
1874 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1879 bp->rx_cons = sw_cons;
1880 bp->rx_prod = sw_prod;
1882 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1884 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1892 /* MSI ISR - The only difference between this and the INTx ISR
1893 * is that the MSI interrupt is always serviced.
1896 bnx2_msi(int irq, void *dev_instance)
1898 struct net_device *dev = dev_instance;
1899 struct bnx2 *bp = netdev_priv(dev);
1901 prefetch(bp->status_blk);
1902 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1903 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1904 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1906 /* Return here if interrupt is disabled. */
1907 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1910 netif_rx_schedule(dev);
1916 bnx2_interrupt(int irq, void *dev_instance)
1918 struct net_device *dev = dev_instance;
1919 struct bnx2 *bp = netdev_priv(dev);
1921 /* When using INTx, it is possible for the interrupt to arrive
1922 * at the CPU before the status block posted prior to the
1923 * interrupt. Reading a register will flush the status block.
1924 * When using MSI, the MSI message will always complete after
1925 * the status block write.
1927 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1928 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1929 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1932 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1933 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1934 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1936 /* Return here if interrupt is shared and is disabled. */
1937 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1940 netif_rx_schedule(dev);
1946 bnx2_has_work(struct bnx2 *bp)
1948 struct status_block *sblk = bp->status_blk;
1950 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1951 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1954 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1962 bnx2_poll(struct net_device *dev, int *budget)
1964 struct bnx2 *bp = netdev_priv(dev);
1966 if ((bp->status_blk->status_attn_bits &
1967 STATUS_ATTN_BITS_LINK_STATE) !=
1968 (bp->status_blk->status_attn_bits_ack &
1969 STATUS_ATTN_BITS_LINK_STATE)) {
1971 spin_lock(&bp->phy_lock);
1973 spin_unlock(&bp->phy_lock);
1975 /* This is needed to take care of transient status
1976 * during link changes.
1978 REG_WR(bp, BNX2_HC_COMMAND,
1979 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
1980 REG_RD(bp, BNX2_HC_COMMAND);
1983 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
1986 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
1987 int orig_budget = *budget;
1990 if (orig_budget > dev->quota)
1991 orig_budget = dev->quota;
1993 work_done = bnx2_rx_int(bp, orig_budget);
1994 *budget -= work_done;
1995 dev->quota -= work_done;
1998 bp->last_status_idx = bp->status_blk->status_idx;
2001 if (!bnx2_has_work(bp)) {
2002 netif_rx_complete(dev);
2003 if (likely(bp->flags & USING_MSI_FLAG)) {
2004 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2005 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2006 bp->last_status_idx);
2009 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2010 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2011 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2012 bp->last_status_idx);
2014 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2015 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2016 bp->last_status_idx);
2023 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2024 * from set_multicast.
2027 bnx2_set_rx_mode(struct net_device *dev)
2029 struct bnx2 *bp = netdev_priv(dev);
2030 u32 rx_mode, sort_mode;
2033 spin_lock_bh(&bp->phy_lock);
2035 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2036 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2037 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2039 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2040 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2042 if (!(bp->flags & ASF_ENABLE_FLAG))
2043 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2045 if (dev->flags & IFF_PROMISC) {
2046 /* Promiscuous mode. */
2047 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2048 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2049 BNX2_RPM_SORT_USER0_PROM_VLAN;
2051 else if (dev->flags & IFF_ALLMULTI) {
2052 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2053 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2056 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2059 /* Accept one or more multicast(s). */
2060 struct dev_mc_list *mclist;
2061 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2066 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2068 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2069 i++, mclist = mclist->next) {
2071 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2073 regidx = (bit & 0xe0) >> 5;
2075 mc_filter[regidx] |= (1 << bit);
2078 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2079 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2083 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2086 if (rx_mode != bp->rx_mode) {
2087 bp->rx_mode = rx_mode;
2088 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2091 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2092 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2093 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2095 spin_unlock_bh(&bp->phy_lock);
2098 #define FW_BUF_SIZE 0x8000
2101 bnx2_gunzip_init(struct bnx2 *bp)
2103 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2106 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2109 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2110 if (bp->strm->workspace == NULL)
2120 vfree(bp->gunzip_buf);
2121 bp->gunzip_buf = NULL;
2124 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2125 "uncompression.\n", bp->dev->name);
2130 bnx2_gunzip_end(struct bnx2 *bp)
2132 kfree(bp->strm->workspace);
2137 if (bp->gunzip_buf) {
2138 vfree(bp->gunzip_buf);
2139 bp->gunzip_buf = NULL;
2144 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2148 /* check gzip header */
2149 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2155 if (zbuf[3] & FNAME)
2156 while ((zbuf[n++] != 0) && (n < len));
2158 bp->strm->next_in = zbuf + n;
2159 bp->strm->avail_in = len - n;
2160 bp->strm->next_out = bp->gunzip_buf;
2161 bp->strm->avail_out = FW_BUF_SIZE;
2163 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2167 rc = zlib_inflate(bp->strm, Z_FINISH);
2169 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2170 *outbuf = bp->gunzip_buf;
2172 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2173 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2174 bp->dev->name, bp->strm->msg);
2176 zlib_inflateEnd(bp->strm);
2178 if (rc == Z_STREAM_END)
2185 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2192 for (i = 0; i < rv2p_code_len; i += 8) {
2193 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2195 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2198 if (rv2p_proc == RV2P_PROC1) {
2199 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2200 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2203 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2204 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2208 /* Reset the processor, un-stall is done later. */
2209 if (rv2p_proc == RV2P_PROC1) {
2210 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2213 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2218 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2224 val = REG_RD_IND(bp, cpu_reg->mode);
2225 val |= cpu_reg->mode_value_halt;
2226 REG_WR_IND(bp, cpu_reg->mode, val);
2227 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2229 /* Load the Text area. */
2230 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2234 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2235 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2239 /* Load the Data area. */
2240 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2244 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2245 REG_WR_IND(bp, offset, fw->data[j]);
2249 /* Load the SBSS area. */
2250 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2254 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2255 REG_WR_IND(bp, offset, fw->sbss[j]);
2259 /* Load the BSS area. */
2260 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2264 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2265 REG_WR_IND(bp, offset, fw->bss[j]);
2269 /* Load the Read-Only area. */
2270 offset = cpu_reg->spad_base +
2271 (fw->rodata_addr - cpu_reg->mips_view_base);
2275 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2276 REG_WR_IND(bp, offset, fw->rodata[j]);
2280 /* Clear the pre-fetch instruction. */
2281 REG_WR_IND(bp, cpu_reg->inst, 0);
2282 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2284 /* Start the CPU. */
2285 val = REG_RD_IND(bp, cpu_reg->mode);
2286 val &= ~cpu_reg->mode_value_halt;
2287 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2288 REG_WR_IND(bp, cpu_reg->mode, val);
2292 bnx2_init_cpus(struct bnx2 *bp)
2294 struct cpu_reg cpu_reg;
2300 if ((rc = bnx2_gunzip_init(bp)) != 0)
2303 /* Initialize the RV2P processor. */
2304 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2309 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2311 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2316 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2318 /* Initialize the RX Processor. */
2319 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2320 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2321 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2322 cpu_reg.state = BNX2_RXP_CPU_STATE;
2323 cpu_reg.state_value_clear = 0xffffff;
2324 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2325 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2326 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2327 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2328 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2329 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2330 cpu_reg.mips_view_base = 0x8000000;
2332 fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2333 fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2334 fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2335 fw.start_addr = bnx2_RXP_b06FwStartAddr;
2337 fw.text_addr = bnx2_RXP_b06FwTextAddr;
2338 fw.text_len = bnx2_RXP_b06FwTextLen;
2341 rc = bnx2_gunzip(bp, bnx2_RXP_b06FwText, sizeof(bnx2_RXP_b06FwText),
2348 fw.data_addr = bnx2_RXP_b06FwDataAddr;
2349 fw.data_len = bnx2_RXP_b06FwDataLen;
2351 fw.data = bnx2_RXP_b06FwData;
2353 fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2354 fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2356 fw.sbss = bnx2_RXP_b06FwSbss;
2358 fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2359 fw.bss_len = bnx2_RXP_b06FwBssLen;
2361 fw.bss = bnx2_RXP_b06FwBss;
2363 fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2364 fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2365 fw.rodata_index = 0;
2366 fw.rodata = bnx2_RXP_b06FwRodata;
2368 load_cpu_fw(bp, &cpu_reg, &fw);
2370 /* Initialize the TX Processor. */
2371 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2372 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2373 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2374 cpu_reg.state = BNX2_TXP_CPU_STATE;
2375 cpu_reg.state_value_clear = 0xffffff;
2376 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2377 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2378 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2379 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2380 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2381 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2382 cpu_reg.mips_view_base = 0x8000000;
2384 fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2385 fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2386 fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2387 fw.start_addr = bnx2_TXP_b06FwStartAddr;
2389 fw.text_addr = bnx2_TXP_b06FwTextAddr;
2390 fw.text_len = bnx2_TXP_b06FwTextLen;
2393 rc = bnx2_gunzip(bp, bnx2_TXP_b06FwText, sizeof(bnx2_TXP_b06FwText),
2400 fw.data_addr = bnx2_TXP_b06FwDataAddr;
2401 fw.data_len = bnx2_TXP_b06FwDataLen;
2403 fw.data = bnx2_TXP_b06FwData;
2405 fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2406 fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2408 fw.sbss = bnx2_TXP_b06FwSbss;
2410 fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2411 fw.bss_len = bnx2_TXP_b06FwBssLen;
2413 fw.bss = bnx2_TXP_b06FwBss;
2415 fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2416 fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2417 fw.rodata_index = 0;
2418 fw.rodata = bnx2_TXP_b06FwRodata;
2420 load_cpu_fw(bp, &cpu_reg, &fw);
2422 /* Initialize the TX Patch-up Processor. */
2423 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2424 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2425 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2426 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2427 cpu_reg.state_value_clear = 0xffffff;
2428 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2429 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2430 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2431 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2432 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2433 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2434 cpu_reg.mips_view_base = 0x8000000;
2436 fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2437 fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2438 fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2439 fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2441 fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2442 fw.text_len = bnx2_TPAT_b06FwTextLen;
2445 rc = bnx2_gunzip(bp, bnx2_TPAT_b06FwText, sizeof(bnx2_TPAT_b06FwText),
2452 fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2453 fw.data_len = bnx2_TPAT_b06FwDataLen;
2455 fw.data = bnx2_TPAT_b06FwData;
2457 fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2458 fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2460 fw.sbss = bnx2_TPAT_b06FwSbss;
2462 fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2463 fw.bss_len = bnx2_TPAT_b06FwBssLen;
2465 fw.bss = bnx2_TPAT_b06FwBss;
2467 fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2468 fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2469 fw.rodata_index = 0;
2470 fw.rodata = bnx2_TPAT_b06FwRodata;
2472 load_cpu_fw(bp, &cpu_reg, &fw);
2474 /* Initialize the Completion Processor. */
2475 cpu_reg.mode = BNX2_COM_CPU_MODE;
2476 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2477 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2478 cpu_reg.state = BNX2_COM_CPU_STATE;
2479 cpu_reg.state_value_clear = 0xffffff;
2480 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2481 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2482 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2483 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2484 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2485 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2486 cpu_reg.mips_view_base = 0x8000000;
2488 fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2489 fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2490 fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2491 fw.start_addr = bnx2_COM_b06FwStartAddr;
2493 fw.text_addr = bnx2_COM_b06FwTextAddr;
2494 fw.text_len = bnx2_COM_b06FwTextLen;
2497 rc = bnx2_gunzip(bp, bnx2_COM_b06FwText, sizeof(bnx2_COM_b06FwText),
2504 fw.data_addr = bnx2_COM_b06FwDataAddr;
2505 fw.data_len = bnx2_COM_b06FwDataLen;
2507 fw.data = bnx2_COM_b06FwData;
2509 fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2510 fw.sbss_len = bnx2_COM_b06FwSbssLen;
2512 fw.sbss = bnx2_COM_b06FwSbss;
2514 fw.bss_addr = bnx2_COM_b06FwBssAddr;
2515 fw.bss_len = bnx2_COM_b06FwBssLen;
2517 fw.bss = bnx2_COM_b06FwBss;
2519 fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2520 fw.rodata_len = bnx2_COM_b06FwRodataLen;
2521 fw.rodata_index = 0;
2522 fw.rodata = bnx2_COM_b06FwRodata;
2524 load_cpu_fw(bp, &cpu_reg, &fw);
2527 bnx2_gunzip_end(bp);
2532 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2536 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2542 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2543 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2544 PCI_PM_CTRL_PME_STATUS);
2546 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2547 /* delay required during transition out of D3hot */
2550 val = REG_RD(bp, BNX2_EMAC_MODE);
2551 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2552 val &= ~BNX2_EMAC_MODE_MPKT;
2553 REG_WR(bp, BNX2_EMAC_MODE, val);
2555 val = REG_RD(bp, BNX2_RPM_CONFIG);
2556 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2557 REG_WR(bp, BNX2_RPM_CONFIG, val);
2568 autoneg = bp->autoneg;
2569 advertising = bp->advertising;
2571 bp->autoneg = AUTONEG_SPEED;
2572 bp->advertising = ADVERTISED_10baseT_Half |
2573 ADVERTISED_10baseT_Full |
2574 ADVERTISED_100baseT_Half |
2575 ADVERTISED_100baseT_Full |
2578 bnx2_setup_copper_phy(bp);
2580 bp->autoneg = autoneg;
2581 bp->advertising = advertising;
2583 bnx2_set_mac_addr(bp);
2585 val = REG_RD(bp, BNX2_EMAC_MODE);
2587 /* Enable port mode. */
2588 val &= ~BNX2_EMAC_MODE_PORT;
2589 val |= BNX2_EMAC_MODE_PORT_MII |
2590 BNX2_EMAC_MODE_MPKT_RCVD |
2591 BNX2_EMAC_MODE_ACPI_RCVD |
2592 BNX2_EMAC_MODE_MPKT;
2594 REG_WR(bp, BNX2_EMAC_MODE, val);
2596 /* receive all multicast */
2597 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2598 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2601 REG_WR(bp, BNX2_EMAC_RX_MODE,
2602 BNX2_EMAC_RX_MODE_SORT_MODE);
2604 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2605 BNX2_RPM_SORT_USER0_MC_EN;
2606 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2607 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2608 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2609 BNX2_RPM_SORT_USER0_ENA);
2611 /* Need to enable EMAC and RPM for WOL. */
2612 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2613 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2614 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2615 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2617 val = REG_RD(bp, BNX2_RPM_CONFIG);
2618 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2619 REG_WR(bp, BNX2_RPM_CONFIG, val);
2621 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2624 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2627 if (!(bp->flags & NO_WOL_FLAG))
2628 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2630 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2631 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2632 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2641 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2643 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2646 /* No more memory access after this point until
2647 * device is brought back to D0.
2659 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2664 /* Request access to the flash interface. */
2665 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2666 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2667 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2668 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2674 if (j >= NVRAM_TIMEOUT_COUNT)
2681 bnx2_release_nvram_lock(struct bnx2 *bp)
2686 /* Relinquish nvram interface. */
2687 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2689 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2690 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2691 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2697 if (j >= NVRAM_TIMEOUT_COUNT)
2705 bnx2_enable_nvram_write(struct bnx2 *bp)
2709 val = REG_RD(bp, BNX2_MISC_CFG);
2710 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2712 if (!bp->flash_info->buffered) {
2715 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2716 REG_WR(bp, BNX2_NVM_COMMAND,
2717 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2719 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2722 val = REG_RD(bp, BNX2_NVM_COMMAND);
2723 if (val & BNX2_NVM_COMMAND_DONE)
2727 if (j >= NVRAM_TIMEOUT_COUNT)
2734 bnx2_disable_nvram_write(struct bnx2 *bp)
2738 val = REG_RD(bp, BNX2_MISC_CFG);
2739 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2744 bnx2_enable_nvram_access(struct bnx2 *bp)
2748 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2749 /* Enable both bits, even on read. */
2750 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2751 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2755 bnx2_disable_nvram_access(struct bnx2 *bp)
2759 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2760 /* Disable both bits, even after read. */
2761 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2762 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2763 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2767 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2772 if (bp->flash_info->buffered)
2773 /* Buffered flash, no erase needed */
2776 /* Build an erase command */
2777 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2778 BNX2_NVM_COMMAND_DOIT;
2780 /* Need to clear DONE bit separately. */
2781 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2783 /* Address of the NVRAM to read from. */
2784 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2786 /* Issue an erase command. */
2787 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2789 /* Wait for completion. */
2790 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2795 val = REG_RD(bp, BNX2_NVM_COMMAND);
2796 if (val & BNX2_NVM_COMMAND_DONE)
2800 if (j >= NVRAM_TIMEOUT_COUNT)
2807 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2812 /* Build the command word. */
2813 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2815 /* Calculate an offset of a buffered flash. */
2816 if (bp->flash_info->buffered) {
2817 offset = ((offset / bp->flash_info->page_size) <<
2818 bp->flash_info->page_bits) +
2819 (offset % bp->flash_info->page_size);
2822 /* Need to clear DONE bit separately. */
2823 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2825 /* Address of the NVRAM to read from. */
2826 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2828 /* Issue a read command. */
2829 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2831 /* Wait for completion. */
2832 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2837 val = REG_RD(bp, BNX2_NVM_COMMAND);
2838 if (val & BNX2_NVM_COMMAND_DONE) {
2839 val = REG_RD(bp, BNX2_NVM_READ);
2841 val = be32_to_cpu(val);
2842 memcpy(ret_val, &val, 4);
2846 if (j >= NVRAM_TIMEOUT_COUNT)
2854 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2859 /* Build the command word. */
2860 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2862 /* Calculate an offset of a buffered flash. */
2863 if (bp->flash_info->buffered) {
2864 offset = ((offset / bp->flash_info->page_size) <<
2865 bp->flash_info->page_bits) +
2866 (offset % bp->flash_info->page_size);
2869 /* Need to clear DONE bit separately. */
2870 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2872 memcpy(&val32, val, 4);
2873 val32 = cpu_to_be32(val32);
2875 /* Write the data. */
2876 REG_WR(bp, BNX2_NVM_WRITE, val32);
2878 /* Address of the NVRAM to write to. */
2879 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2881 /* Issue the write command. */
2882 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2884 /* Wait for completion. */
2885 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2888 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2891 if (j >= NVRAM_TIMEOUT_COUNT)
2898 bnx2_init_nvram(struct bnx2 *bp)
2901 int j, entry_count, rc;
2902 struct flash_spec *flash;
2904 /* Determine the selected interface. */
2905 val = REG_RD(bp, BNX2_NVM_CFG1);
2907 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2910 if (val & 0x40000000) {
2912 /* Flash interface has been reconfigured */
2913 for (j = 0, flash = &flash_table[0]; j < entry_count;
2915 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2916 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2917 bp->flash_info = flash;
2924 /* Not yet been reconfigured */
2926 if (val & (1 << 23))
2927 mask = FLASH_BACKUP_STRAP_MASK;
2929 mask = FLASH_STRAP_MASK;
2931 for (j = 0, flash = &flash_table[0]; j < entry_count;
2934 if ((val & mask) == (flash->strapping & mask)) {
2935 bp->flash_info = flash;
2937 /* Request access to the flash interface. */
2938 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2941 /* Enable access to flash interface */
2942 bnx2_enable_nvram_access(bp);
2944 /* Reconfigure the flash interface */
2945 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2946 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2947 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2948 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2950 /* Disable access to flash interface */
2951 bnx2_disable_nvram_access(bp);
2952 bnx2_release_nvram_lock(bp);
2957 } /* if (val & 0x40000000) */
2959 if (j == entry_count) {
2960 bp->flash_info = NULL;
2961 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2965 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2966 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2968 bp->flash_size = val;
2970 bp->flash_size = bp->flash_info->total_size;
2976 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2980 u32 cmd_flags, offset32, len32, extra;
2985 /* Request access to the flash interface. */
2986 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2989 /* Enable access to flash interface */
2990 bnx2_enable_nvram_access(bp);
3003 pre_len = 4 - (offset & 3);
3005 if (pre_len >= len32) {
3007 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3008 BNX2_NVM_COMMAND_LAST;
3011 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3014 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3019 memcpy(ret_buf, buf + (offset & 3), pre_len);
3026 extra = 4 - (len32 & 3);
3027 len32 = (len32 + 4) & ~3;
3034 cmd_flags = BNX2_NVM_COMMAND_LAST;
3036 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3037 BNX2_NVM_COMMAND_LAST;
3039 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3041 memcpy(ret_buf, buf, 4 - extra);
3043 else if (len32 > 0) {
3046 /* Read the first word. */
3050 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3052 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3054 /* Advance to the next dword. */
3059 while (len32 > 4 && rc == 0) {
3060 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3062 /* Advance to the next dword. */
3071 cmd_flags = BNX2_NVM_COMMAND_LAST;
3072 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3074 memcpy(ret_buf, buf, 4 - extra);
3077 /* Disable access to flash interface */
3078 bnx2_disable_nvram_access(bp);
3080 bnx2_release_nvram_lock(bp);
3086 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3089 u32 written, offset32, len32;
3090 u8 *buf, start[4], end[4], *flash_buffer = NULL;
3092 int align_start, align_end;
3097 align_start = align_end = 0;
3099 if ((align_start = (offset32 & 3))) {
3101 len32 += align_start;
3102 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3107 if ((len32 > 4) || !align_start) {
3108 align_end = 4 - (len32 & 3);
3110 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3117 if (align_start || align_end) {
3118 buf = kmalloc(len32, GFP_KERNEL);
3122 memcpy(buf, start, 4);
3125 memcpy(buf + len32 - 4, end, 4);
3127 memcpy(buf + align_start, data_buf, buf_size);
3130 if (bp->flash_info->buffered == 0) {
3131 flash_buffer = kmalloc(264, GFP_KERNEL);
3132 if (flash_buffer == NULL) {
3134 goto nvram_write_end;
3139 while ((written < len32) && (rc == 0)) {
3140 u32 page_start, page_end, data_start, data_end;
3141 u32 addr, cmd_flags;
3144 /* Find the page_start addr */
3145 page_start = offset32 + written;
3146 page_start -= (page_start % bp->flash_info->page_size);
3147 /* Find the page_end addr */
3148 page_end = page_start + bp->flash_info->page_size;
3149 /* Find the data_start addr */
3150 data_start = (written == 0) ? offset32 : page_start;
3151 /* Find the data_end addr */
3152 data_end = (page_end > offset32 + len32) ?
3153 (offset32 + len32) : page_end;
3155 /* Request access to the flash interface. */
3156 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3157 goto nvram_write_end;
3159 /* Enable access to flash interface */
3160 bnx2_enable_nvram_access(bp);
3162 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3163 if (bp->flash_info->buffered == 0) {
3166 /* Read the whole page into the buffer
3167 * (non-buffer flash only) */
3168 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3169 if (j == (bp->flash_info->page_size - 4)) {
3170 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3172 rc = bnx2_nvram_read_dword(bp,
3178 goto nvram_write_end;
3184 /* Enable writes to flash interface (unlock write-protect) */
3185 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3186 goto nvram_write_end;
3188 /* Erase the page */
3189 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3190 goto nvram_write_end;
3192 /* Re-enable the write again for the actual write */
3193 bnx2_enable_nvram_write(bp);
3195 /* Loop to write back the buffer data from page_start to
3198 if (bp->flash_info->buffered == 0) {
3199 for (addr = page_start; addr < data_start;
3200 addr += 4, i += 4) {
3202 rc = bnx2_nvram_write_dword(bp, addr,
3203 &flash_buffer[i], cmd_flags);
3206 goto nvram_write_end;
3212 /* Loop to write the new data from data_start to data_end */
3213 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3214 if ((addr == page_end - 4) ||
3215 ((bp->flash_info->buffered) &&
3216 (addr == data_end - 4))) {
3218 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3220 rc = bnx2_nvram_write_dword(bp, addr, buf,
3224 goto nvram_write_end;
3230 /* Loop to write back the buffer data from data_end
3232 if (bp->flash_info->buffered == 0) {
3233 for (addr = data_end; addr < page_end;
3234 addr += 4, i += 4) {
3236 if (addr == page_end-4) {
3237 cmd_flags = BNX2_NVM_COMMAND_LAST;
3239 rc = bnx2_nvram_write_dword(bp, addr,
3240 &flash_buffer[i], cmd_flags);
3243 goto nvram_write_end;
3249 /* Disable writes to flash interface (lock write-protect) */
3250 bnx2_disable_nvram_write(bp);
3252 /* Disable access to flash interface */
3253 bnx2_disable_nvram_access(bp);
3254 bnx2_release_nvram_lock(bp);
3256 /* Increment written */
3257 written += data_end - data_start;
3261 if (bp->flash_info->buffered == 0)
3262 kfree(flash_buffer);
3264 if (align_start || align_end)
3270 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3275 /* Wait for the current PCI transaction to complete before
3276 * issuing a reset. */
3277 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3278 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3279 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3280 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3281 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3282 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3285 /* Wait for the firmware to tell us it is ok to issue a reset. */
3286 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3288 /* Deposit a driver reset signature so the firmware knows that
3289 * this is a soft reset. */
3290 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3291 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3293 /* Do a dummy read to force the chip to complete all current transaction
3294 * before we issue a reset. */
3295 val = REG_RD(bp, BNX2_MISC_ID);
3297 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3298 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3299 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3302 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3304 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3305 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3308 /* Reset takes approximate 30 usec */
3309 for (i = 0; i < 10; i++) {
3310 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3311 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3312 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3318 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3319 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3320 printk(KERN_ERR PFX "Chip reset did not complete\n");
3324 /* Make sure byte swapping is properly configured. */
3325 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3326 if (val != 0x01020304) {
3327 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3331 /* Wait for the firmware to finish its initialization. */
3332 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3336 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3337 /* Adjust the voltage regular to two steps lower. The default
3338 * of this register is 0x0000000e. */
3339 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3341 /* Remove bad rbuf memory from the free pool. */
3342 rc = bnx2_alloc_bad_rbuf(bp);
3349 bnx2_init_chip(struct bnx2 *bp)
3354 /* Make sure the interrupt is not active. */
3355 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3357 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3358 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3360 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3362 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3363 DMA_READ_CHANS << 12 |
3364 DMA_WRITE_CHANS << 16;
3366 val |= (0x2 << 20) | (1 << 11);
3368 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3371 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3372 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3373 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3375 REG_WR(bp, BNX2_DMA_CONFIG, val);
3377 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3378 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3379 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3380 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3383 if (bp->flags & PCIX_FLAG) {
3386 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3388 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3389 val16 & ~PCI_X_CMD_ERO);
3392 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3393 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3394 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3395 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3397 /* Initialize context mapping and zero out the quick contexts. The
3398 * context block must have already been enabled. */
3399 bnx2_init_context(bp);
3401 if ((rc = bnx2_init_cpus(bp)) != 0)
3404 bnx2_init_nvram(bp);
3406 bnx2_set_mac_addr(bp);
3408 val = REG_RD(bp, BNX2_MQ_CONFIG);
3409 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3410 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3411 REG_WR(bp, BNX2_MQ_CONFIG, val);
3413 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3414 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3415 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3417 val = (BCM_PAGE_BITS - 8) << 24;
3418 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3420 /* Configure page size. */
3421 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3422 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3423 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3424 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3426 val = bp->mac_addr[0] +
3427 (bp->mac_addr[1] << 8) +
3428 (bp->mac_addr[2] << 16) +
3430 (bp->mac_addr[4] << 8) +
3431 (bp->mac_addr[5] << 16);
3432 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3434 /* Program the MTU. Also include 4 bytes for CRC32. */
3435 val = bp->dev->mtu + ETH_HLEN + 4;
3436 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3437 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3438 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3440 bp->last_status_idx = 0;
3441 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3443 /* Set up how to generate a link change interrupt. */
3444 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3446 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3447 (u64) bp->status_blk_mapping & 0xffffffff);
3448 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3450 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3451 (u64) bp->stats_blk_mapping & 0xffffffff);
3452 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3453 (u64) bp->stats_blk_mapping >> 32);
3455 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3456 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3458 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3459 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3461 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3462 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3464 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3466 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3468 REG_WR(bp, BNX2_HC_COM_TICKS,
3469 (bp->com_ticks_int << 16) | bp->com_ticks);
3471 REG_WR(bp, BNX2_HC_CMD_TICKS,
3472 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3474 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3475 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3477 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3478 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3480 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3481 BNX2_HC_CONFIG_TX_TMR_MODE |
3482 BNX2_HC_CONFIG_COLLECT_STATS);
3485 /* Clear internal stats counters. */
3486 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3488 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3490 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3491 BNX2_PORT_FEATURE_ASF_ENABLED)
3492 bp->flags |= ASF_ENABLE_FLAG;
3494 /* Initialize the receive filter. */
3495 bnx2_set_rx_mode(bp->dev);
3497 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3500 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3501 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3505 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3512 bnx2_init_tx_ring(struct bnx2 *bp)
3517 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3519 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3521 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3522 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3527 bp->tx_prod_bseq = 0;
3529 val = BNX2_L2CTX_TYPE_TYPE_L2;
3530 val |= BNX2_L2CTX_TYPE_SIZE_L2;
3531 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3533 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3535 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3537 val = (u64) bp->tx_desc_mapping >> 32;
3538 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3540 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3541 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3545 bnx2_init_rx_ring(struct bnx2 *bp)
3549 u16 prod, ring_prod;
3552 /* 8 for CRC and VLAN */
3553 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3554 /* 8 for alignment */
3555 bp->rx_buf_size = bp->rx_buf_use_size + 8;
3557 ring_prod = prod = bp->rx_prod = 0;
3560 bp->rx_prod_bseq = 0;
3562 for (i = 0; i < bp->rx_max_ring; i++) {
3565 rxbd = &bp->rx_desc_ring[i][0];
3566 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3567 rxbd->rx_bd_len = bp->rx_buf_use_size;
3568 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3570 if (i == (bp->rx_max_ring - 1))
3574 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3575 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3579 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3580 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3582 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3584 val = (u64) bp->rx_desc_mapping[0] >> 32;
3585 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3587 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3588 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3590 for (i = 0; i < bp->rx_ring_size; i++) {
3591 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3594 prod = NEXT_RX_BD(prod);
3595 ring_prod = RX_RING_IDX(prod);
3599 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3601 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3605 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3609 bp->rx_ring_size = size;
3611 while (size > MAX_RX_DESC_CNT) {
3612 size -= MAX_RX_DESC_CNT;
3615 /* round to next power of 2 */
3617 while ((max & num_rings) == 0)
3620 if (num_rings != max)
3623 bp->rx_max_ring = max;
3624 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3628 bnx2_free_tx_skbs(struct bnx2 *bp)
3632 if (bp->tx_buf_ring == NULL)
3635 for (i = 0; i < TX_DESC_CNT; ) {
3636 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3637 struct sk_buff *skb = tx_buf->skb;
3645 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3646 skb_headlen(skb), PCI_DMA_TODEVICE);
3650 last = skb_shinfo(skb)->nr_frags;
3651 for (j = 0; j < last; j++) {
3652 tx_buf = &bp->tx_buf_ring[i + j + 1];
3653 pci_unmap_page(bp->pdev,
3654 pci_unmap_addr(tx_buf, mapping),
3655 skb_shinfo(skb)->frags[j].size,
3665 bnx2_free_rx_skbs(struct bnx2 *bp)
3669 if (bp->rx_buf_ring == NULL)
3672 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3673 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3674 struct sk_buff *skb = rx_buf->skb;
3679 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3680 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3689 bnx2_free_skbs(struct bnx2 *bp)
3691 bnx2_free_tx_skbs(bp);
3692 bnx2_free_rx_skbs(bp);
3696 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3700 rc = bnx2_reset_chip(bp, reset_code);
3705 if ((rc = bnx2_init_chip(bp)) != 0)
3708 bnx2_init_tx_ring(bp);
3709 bnx2_init_rx_ring(bp);
3714 bnx2_init_nic(struct bnx2 *bp)
3718 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3721 spin_lock_bh(&bp->phy_lock);
3723 spin_unlock_bh(&bp->phy_lock);
3729 bnx2_test_registers(struct bnx2 *bp)
3733 static const struct {
3739 { 0x006c, 0, 0x00000000, 0x0000003f },
3740 { 0x0090, 0, 0xffffffff, 0x00000000 },
3741 { 0x0094, 0, 0x00000000, 0x00000000 },
3743 { 0x0404, 0, 0x00003f00, 0x00000000 },
3744 { 0x0418, 0, 0x00000000, 0xffffffff },
3745 { 0x041c, 0, 0x00000000, 0xffffffff },
3746 { 0x0420, 0, 0x00000000, 0x80ffffff },
3747 { 0x0424, 0, 0x00000000, 0x00000000 },
3748 { 0x0428, 0, 0x00000000, 0x00000001 },
3749 { 0x0450, 0, 0x00000000, 0x0000ffff },
3750 { 0x0454, 0, 0x00000000, 0xffffffff },
3751 { 0x0458, 0, 0x00000000, 0xffffffff },
3753 { 0x0808, 0, 0x00000000, 0xffffffff },
3754 { 0x0854, 0, 0x00000000, 0xffffffff },
3755 { 0x0868, 0, 0x00000000, 0x77777777 },
3756 { 0x086c, 0, 0x00000000, 0x77777777 },
3757 { 0x0870, 0, 0x00000000, 0x77777777 },
3758 { 0x0874, 0, 0x00000000, 0x77777777 },
3760 { 0x0c00, 0, 0x00000000, 0x00000001 },
3761 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3762 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3764 { 0x1000, 0, 0x00000000, 0x00000001 },
3765 { 0x1004, 0, 0x00000000, 0x000f0001 },
3767 { 0x1408, 0, 0x01c00800, 0x00000000 },
3768 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3769 { 0x14a8, 0, 0x00000000, 0x000001ff },
3770 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3771 { 0x14b0, 0, 0x00000002, 0x00000001 },
3772 { 0x14b8, 0, 0x00000000, 0x00000000 },
3773 { 0x14c0, 0, 0x00000000, 0x00000009 },
3774 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3775 { 0x14cc, 0, 0x00000000, 0x00000001 },
3776 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3778 { 0x1800, 0, 0x00000000, 0x00000001 },
3779 { 0x1804, 0, 0x00000000, 0x00000003 },
3781 { 0x2800, 0, 0x00000000, 0x00000001 },
3782 { 0x2804, 0, 0x00000000, 0x00003f01 },
3783 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3784 { 0x2810, 0, 0xffff0000, 0x00000000 },
3785 { 0x2814, 0, 0xffff0000, 0x00000000 },
3786 { 0x2818, 0, 0xffff0000, 0x00000000 },
3787 { 0x281c, 0, 0xffff0000, 0x00000000 },
3788 { 0x2834, 0, 0xffffffff, 0x00000000 },
3789 { 0x2840, 0, 0x00000000, 0xffffffff },
3790 { 0x2844, 0, 0x00000000, 0xffffffff },
3791 { 0x2848, 0, 0xffffffff, 0x00000000 },
3792 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3794 { 0x2c00, 0, 0x00000000, 0x00000011 },
3795 { 0x2c04, 0, 0x00000000, 0x00030007 },
3797 { 0x3c00, 0, 0x00000000, 0x00000001 },
3798 { 0x3c04, 0, 0x00000000, 0x00070000 },
3799 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3800 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3801 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3802 { 0x3c14, 0, 0x00000000, 0xffffffff },
3803 { 0x3c18, 0, 0x00000000, 0xffffffff },
3804 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3805 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3807 { 0x5004, 0, 0x00000000, 0x0000007f },
3808 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3809 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3811 { 0x5c00, 0, 0x00000000, 0x00000001 },
3812 { 0x5c04, 0, 0x00000000, 0x0003000f },
3813 { 0x5c08, 0, 0x00000003, 0x00000000 },
3814 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3815 { 0x5c10, 0, 0x00000000, 0xffffffff },
3816 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3817 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3818 { 0x5c88, 0, 0x00000000, 0x00077373 },
3819 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3821 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3822 { 0x680c, 0, 0xffffffff, 0x00000000 },
3823 { 0x6810, 0, 0xffffffff, 0x00000000 },
3824 { 0x6814, 0, 0xffffffff, 0x00000000 },
3825 { 0x6818, 0, 0xffffffff, 0x00000000 },
3826 { 0x681c, 0, 0xffffffff, 0x00000000 },
3827 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3828 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3829 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3830 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3831 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3832 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3833 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3834 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3835 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3836 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3837 { 0x684c, 0, 0xffffffff, 0x00000000 },
3838 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3839 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3840 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3841 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3842 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3843 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3845 { 0xffff, 0, 0x00000000, 0x00000000 },
3849 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3850 u32 offset, rw_mask, ro_mask, save_val, val;
3852 offset = (u32) reg_tbl[i].offset;
3853 rw_mask = reg_tbl[i].rw_mask;
3854 ro_mask = reg_tbl[i].ro_mask;
3856 save_val = readl(bp->regview + offset);
3858 writel(0, bp->regview + offset);
3860 val = readl(bp->regview + offset);
3861 if ((val & rw_mask) != 0) {
3865 if ((val & ro_mask) != (save_val & ro_mask)) {
3869 writel(0xffffffff, bp->regview + offset);
3871 val = readl(bp->regview + offset);
3872 if ((val & rw_mask) != rw_mask) {
3876 if ((val & ro_mask) != (save_val & ro_mask)) {
3880 writel(save_val, bp->regview + offset);
3884 writel(save_val, bp->regview + offset);
3892 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3894 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3895 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3898 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3901 for (offset = 0; offset < size; offset += 4) {
3903 REG_WR_IND(bp, start + offset, test_pattern[i]);
3905 if (REG_RD_IND(bp, start + offset) !=
3915 bnx2_test_memory(struct bnx2 *bp)
3919 static const struct {
3923 { 0x60000, 0x4000 },
3924 { 0xa0000, 0x3000 },
3925 { 0xe0000, 0x4000 },
3926 { 0x120000, 0x4000 },
3927 { 0x1a0000, 0x4000 },
3928 { 0x160000, 0x4000 },
3932 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3933 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3934 mem_tbl[i].len)) != 0) {
3942 #define BNX2_MAC_LOOPBACK 0
3943 #define BNX2_PHY_LOOPBACK 1
3946 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3948 unsigned int pkt_size, num_pkts, i;
3949 struct sk_buff *skb, *rx_skb;
3950 unsigned char *packet;
3951 u16 rx_start_idx, rx_idx;
3954 struct sw_bd *rx_buf;
3955 struct l2_fhdr *rx_hdr;
3958 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3959 bp->loopback = MAC_LOOPBACK;
3960 bnx2_set_mac_loopback(bp);
3962 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3963 bp->loopback = PHY_LOOPBACK;
3964 bnx2_set_phy_loopback(bp);
3970 skb = netdev_alloc_skb(bp->dev, pkt_size);
3973 packet = skb_put(skb, pkt_size);
3974 memcpy(packet, bp->mac_addr, 6);
3975 memset(packet + 6, 0x0, 8);
3976 for (i = 14; i < pkt_size; i++)
3977 packet[i] = (unsigned char) (i & 0xff);
3979 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3982 REG_WR(bp, BNX2_HC_COMMAND,
3983 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3985 REG_RD(bp, BNX2_HC_COMMAND);
3988 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3992 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
3994 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3995 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3996 txbd->tx_bd_mss_nbytes = pkt_size;
3997 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4000 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4001 bp->tx_prod_bseq += pkt_size;
4003 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
4004 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4008 REG_WR(bp, BNX2_HC_COMMAND,
4009 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4011 REG_RD(bp, BNX2_HC_COMMAND);
4015 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4018 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4019 goto loopback_test_done;
4022 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4023 if (rx_idx != rx_start_idx + num_pkts) {
4024 goto loopback_test_done;
4027 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4028 rx_skb = rx_buf->skb;
4030 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4031 skb_reserve(rx_skb, bp->rx_offset);
4033 pci_dma_sync_single_for_cpu(bp->pdev,
4034 pci_unmap_addr(rx_buf, mapping),
4035 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4037 if (rx_hdr->l2_fhdr_status &
4038 (L2_FHDR_ERRORS_BAD_CRC |
4039 L2_FHDR_ERRORS_PHY_DECODE |
4040 L2_FHDR_ERRORS_ALIGNMENT |
4041 L2_FHDR_ERRORS_TOO_SHORT |
4042 L2_FHDR_ERRORS_GIANT_FRAME)) {
4044 goto loopback_test_done;
4047 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4048 goto loopback_test_done;
4051 for (i = 14; i < pkt_size; i++) {
4052 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4053 goto loopback_test_done;
4064 #define BNX2_MAC_LOOPBACK_FAILED 1
4065 #define BNX2_PHY_LOOPBACK_FAILED 2
4066 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4067 BNX2_PHY_LOOPBACK_FAILED)
4070 bnx2_test_loopback(struct bnx2 *bp)
4074 if (!netif_running(bp->dev))
4075 return BNX2_LOOPBACK_FAILED;
4077 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4078 spin_lock_bh(&bp->phy_lock);
4080 spin_unlock_bh(&bp->phy_lock);
4081 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4082 rc |= BNX2_MAC_LOOPBACK_FAILED;
4083 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4084 rc |= BNX2_PHY_LOOPBACK_FAILED;
4088 #define NVRAM_SIZE 0x200
4089 #define CRC32_RESIDUAL 0xdebb20e3
4092 bnx2_test_nvram(struct bnx2 *bp)
4094 u32 buf[NVRAM_SIZE / 4];
4095 u8 *data = (u8 *) buf;
4099 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4100 goto test_nvram_done;
4102 magic = be32_to_cpu(buf[0]);
4103 if (magic != 0x669955aa) {
4105 goto test_nvram_done;
4108 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4109 goto test_nvram_done;
4111 csum = ether_crc_le(0x100, data);
4112 if (csum != CRC32_RESIDUAL) {
4114 goto test_nvram_done;
4117 csum = ether_crc_le(0x100, data + 0x100);
4118 if (csum != CRC32_RESIDUAL) {
4127 bnx2_test_link(struct bnx2 *bp)
4131 spin_lock_bh(&bp->phy_lock);
4132 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4133 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4134 spin_unlock_bh(&bp->phy_lock);
4136 if (bmsr & BMSR_LSTATUS) {
4143 bnx2_test_intr(struct bnx2 *bp)
4148 if (!netif_running(bp->dev))
4151 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4153 /* This register is not touched during run-time. */
4154 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4155 REG_RD(bp, BNX2_HC_COMMAND);
4157 for (i = 0; i < 10; i++) {
4158 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4164 msleep_interruptible(10);
4173 bnx2_5706_serdes_timer(struct bnx2 *bp)
4175 spin_lock(&bp->phy_lock);
4176 if (bp->serdes_an_pending)
4177 bp->serdes_an_pending--;
4178 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4181 bp->current_interval = bp->timer_interval;
4183 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4185 if (bmcr & BMCR_ANENABLE) {
4188 bnx2_write_phy(bp, 0x1c, 0x7c00);
4189 bnx2_read_phy(bp, 0x1c, &phy1);
4191 bnx2_write_phy(bp, 0x17, 0x0f01);
4192 bnx2_read_phy(bp, 0x15, &phy2);
4193 bnx2_write_phy(bp, 0x17, 0x0f01);
4194 bnx2_read_phy(bp, 0x15, &phy2);
4196 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4197 !(phy2 & 0x20)) { /* no CONFIG */
4199 bmcr &= ~BMCR_ANENABLE;
4200 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4201 bnx2_write_phy(bp, MII_BMCR, bmcr);
4202 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4206 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4207 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4210 bnx2_write_phy(bp, 0x17, 0x0f01);
4211 bnx2_read_phy(bp, 0x15, &phy2);
4215 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4216 bmcr |= BMCR_ANENABLE;
4217 bnx2_write_phy(bp, MII_BMCR, bmcr);
4219 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4222 bp->current_interval = bp->timer_interval;
4224 spin_unlock(&bp->phy_lock);
4228 bnx2_5708_serdes_timer(struct bnx2 *bp)
4230 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4231 bp->serdes_an_pending = 0;
4235 spin_lock(&bp->phy_lock);
4236 if (bp->serdes_an_pending)
4237 bp->serdes_an_pending--;
4238 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4241 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4243 if (bmcr & BMCR_ANENABLE) {
4244 bmcr &= ~BMCR_ANENABLE;
4245 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4246 bnx2_write_phy(bp, MII_BMCR, bmcr);
4247 bp->current_interval = SERDES_FORCED_TIMEOUT;
4249 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4250 bmcr |= BMCR_ANENABLE;
4251 bnx2_write_phy(bp, MII_BMCR, bmcr);
4252 bp->serdes_an_pending = 2;
4253 bp->current_interval = bp->timer_interval;
4257 bp->current_interval = bp->timer_interval;
4259 spin_unlock(&bp->phy_lock);
4263 bnx2_timer(unsigned long data)
4265 struct bnx2 *bp = (struct bnx2 *) data;
4268 if (!netif_running(bp->dev))
4271 if (atomic_read(&bp->intr_sem) != 0)
4272 goto bnx2_restart_timer;
4274 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4275 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4277 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4279 if (bp->phy_flags & PHY_SERDES_FLAG) {
4280 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4281 bnx2_5706_serdes_timer(bp);
4282 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4283 bnx2_5708_serdes_timer(bp);
4287 mod_timer(&bp->timer, jiffies + bp->current_interval);
4290 /* Called with rtnl_lock */
4292 bnx2_open(struct net_device *dev)
4294 struct bnx2 *bp = netdev_priv(dev);
4297 bnx2_set_power_state(bp, PCI_D0);
4298 bnx2_disable_int(bp);
4300 rc = bnx2_alloc_mem(bp);
4304 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4305 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4308 if (pci_enable_msi(bp->pdev) == 0) {
4309 bp->flags |= USING_MSI_FLAG;
4310 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4314 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4315 IRQF_SHARED, dev->name, dev);
4319 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4327 rc = bnx2_init_nic(bp);
4330 free_irq(bp->pdev->irq, dev);
4331 if (bp->flags & USING_MSI_FLAG) {
4332 pci_disable_msi(bp->pdev);
4333 bp->flags &= ~USING_MSI_FLAG;
4340 mod_timer(&bp->timer, jiffies + bp->current_interval);
4342 atomic_set(&bp->intr_sem, 0);
4344 bnx2_enable_int(bp);
4346 if (bp->flags & USING_MSI_FLAG) {
4347 /* Test MSI to make sure it is working
4348 * If MSI test fails, go back to INTx mode
4350 if (bnx2_test_intr(bp) != 0) {
4351 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4352 " using MSI, switching to INTx mode. Please"
4353 " report this failure to the PCI maintainer"
4354 " and include system chipset information.\n",
4357 bnx2_disable_int(bp);
4358 free_irq(bp->pdev->irq, dev);
4359 pci_disable_msi(bp->pdev);
4360 bp->flags &= ~USING_MSI_FLAG;
4362 rc = bnx2_init_nic(bp);
4365 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4366 IRQF_SHARED, dev->name, dev);
4371 del_timer_sync(&bp->timer);
4374 bnx2_enable_int(bp);
4377 if (bp->flags & USING_MSI_FLAG) {
4378 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4381 netif_start_queue(dev);
4387 bnx2_reset_task(void *data)
4389 struct bnx2 *bp = data;
4391 if (!netif_running(bp->dev))
4394 bp->in_reset_task = 1;
4395 bnx2_netif_stop(bp);
4399 atomic_set(&bp->intr_sem, 1);
4400 bnx2_netif_start(bp);
4401 bp->in_reset_task = 0;
4405 bnx2_tx_timeout(struct net_device *dev)
4407 struct bnx2 *bp = netdev_priv(dev);
4409 /* This allows the netif to be shutdown gracefully before resetting */
4410 schedule_work(&bp->reset_task);
4414 /* Called with rtnl_lock */
4416 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4418 struct bnx2 *bp = netdev_priv(dev);
4420 bnx2_netif_stop(bp);
4423 bnx2_set_rx_mode(dev);
4425 bnx2_netif_start(bp);
4428 /* Called with rtnl_lock */
4430 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4432 struct bnx2 *bp = netdev_priv(dev);
4434 bnx2_netif_stop(bp);
4437 bp->vlgrp->vlan_devices[vid] = NULL;
4438 bnx2_set_rx_mode(dev);
4440 bnx2_netif_start(bp);
4444 /* Called with netif_tx_lock.
4445 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4446 * netif_wake_queue().
4449 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4451 struct bnx2 *bp = netdev_priv(dev);
4454 struct sw_bd *tx_buf;
4455 u32 len, vlan_tag_flags, last_frag, mss;
4456 u16 prod, ring_prod;
4459 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4460 netif_stop_queue(dev);
4461 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4464 return NETDEV_TX_BUSY;
4466 len = skb_headlen(skb);
4468 ring_prod = TX_RING_IDX(prod);
4471 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4472 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4475 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4477 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4480 if ((mss = skb_shinfo(skb)->gso_size) &&
4481 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4482 u32 tcp_opt_len, ip_tcp_len;
4484 if (skb_header_cloned(skb) &&
4485 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4487 return NETDEV_TX_OK;
4490 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4491 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4494 if (skb->h.th->doff > 5) {
4495 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4497 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4499 skb->nh.iph->check = 0;
4500 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4502 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4506 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4507 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4508 (tcp_opt_len >> 2)) << 8;
4517 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4519 tx_buf = &bp->tx_buf_ring[ring_prod];
4521 pci_unmap_addr_set(tx_buf, mapping, mapping);
4523 txbd = &bp->tx_desc_ring[ring_prod];
4525 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4526 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4527 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4528 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4530 last_frag = skb_shinfo(skb)->nr_frags;
4532 for (i = 0; i < last_frag; i++) {
4533 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4535 prod = NEXT_TX_BD(prod);
4536 ring_prod = TX_RING_IDX(prod);
4537 txbd = &bp->tx_desc_ring[ring_prod];
4540 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4541 len, PCI_DMA_TODEVICE);
4542 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4545 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4546 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4547 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4548 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4551 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4553 prod = NEXT_TX_BD(prod);
4554 bp->tx_prod_bseq += skb->len;
4556 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4557 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4562 dev->trans_start = jiffies;
4564 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4565 netif_stop_queue(dev);
4566 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4567 netif_wake_queue(dev);
4570 return NETDEV_TX_OK;
4573 /* Called with rtnl_lock */
4575 bnx2_close(struct net_device *dev)
4577 struct bnx2 *bp = netdev_priv(dev);
4580 /* Calling flush_scheduled_work() may deadlock because
4581 * linkwatch_event() may be on the workqueue and it will try to get
4582 * the rtnl_lock which we are holding.
4584 while (bp->in_reset_task)
4587 bnx2_netif_stop(bp);
4588 del_timer_sync(&bp->timer);
4589 if (bp->flags & NO_WOL_FLAG)
4590 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4592 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4594 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4595 bnx2_reset_chip(bp, reset_code);
4596 free_irq(bp->pdev->irq, dev);
4597 if (bp->flags & USING_MSI_FLAG) {
4598 pci_disable_msi(bp->pdev);
4599 bp->flags &= ~USING_MSI_FLAG;
4604 netif_carrier_off(bp->dev);
4605 bnx2_set_power_state(bp, PCI_D3hot);
4609 #define GET_NET_STATS64(ctr) \
4610 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4611 (unsigned long) (ctr##_lo)
4613 #define GET_NET_STATS32(ctr) \
4616 #if (BITS_PER_LONG == 64)
4617 #define GET_NET_STATS GET_NET_STATS64
4619 #define GET_NET_STATS GET_NET_STATS32
4622 static struct net_device_stats *
4623 bnx2_get_stats(struct net_device *dev)
4625 struct bnx2 *bp = netdev_priv(dev);
4626 struct statistics_block *stats_blk = bp->stats_blk;
4627 struct net_device_stats *net_stats = &bp->net_stats;
4629 if (bp->stats_blk == NULL) {
4632 net_stats->rx_packets =
4633 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4634 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4635 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4637 net_stats->tx_packets =
4638 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4639 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4640 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4642 net_stats->rx_bytes =
4643 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4645 net_stats->tx_bytes =
4646 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4648 net_stats->multicast =
4649 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4651 net_stats->collisions =
4652 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4654 net_stats->rx_length_errors =
4655 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4656 stats_blk->stat_EtherStatsOverrsizePkts);
4658 net_stats->rx_over_errors =
4659 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4661 net_stats->rx_frame_errors =
4662 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4664 net_stats->rx_crc_errors =
4665 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4667 net_stats->rx_errors = net_stats->rx_length_errors +
4668 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4669 net_stats->rx_crc_errors;
4671 net_stats->tx_aborted_errors =
4672 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4673 stats_blk->stat_Dot3StatsLateCollisions);
4675 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4676 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4677 net_stats->tx_carrier_errors = 0;
4679 net_stats->tx_carrier_errors =
4681 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4684 net_stats->tx_errors =
4686 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4688 net_stats->tx_aborted_errors +
4689 net_stats->tx_carrier_errors;
4691 net_stats->rx_missed_errors =
4692 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4693 stats_blk->stat_FwRxDrop);
4698 /* All ethtool functions called with rtnl_lock */
4701 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4703 struct bnx2 *bp = netdev_priv(dev);
4705 cmd->supported = SUPPORTED_Autoneg;
4706 if (bp->phy_flags & PHY_SERDES_FLAG) {
4707 cmd->supported |= SUPPORTED_1000baseT_Full |
4710 cmd->port = PORT_FIBRE;
4713 cmd->supported |= SUPPORTED_10baseT_Half |
4714 SUPPORTED_10baseT_Full |
4715 SUPPORTED_100baseT_Half |
4716 SUPPORTED_100baseT_Full |
4717 SUPPORTED_1000baseT_Full |
4720 cmd->port = PORT_TP;
4723 cmd->advertising = bp->advertising;
4725 if (bp->autoneg & AUTONEG_SPEED) {
4726 cmd->autoneg = AUTONEG_ENABLE;
4729 cmd->autoneg = AUTONEG_DISABLE;
4732 if (netif_carrier_ok(dev)) {
4733 cmd->speed = bp->line_speed;
4734 cmd->duplex = bp->duplex;
4741 cmd->transceiver = XCVR_INTERNAL;
4742 cmd->phy_address = bp->phy_addr;
4748 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4750 struct bnx2 *bp = netdev_priv(dev);
4751 u8 autoneg = bp->autoneg;
4752 u8 req_duplex = bp->req_duplex;
4753 u16 req_line_speed = bp->req_line_speed;
4754 u32 advertising = bp->advertising;
4756 if (cmd->autoneg == AUTONEG_ENABLE) {
4757 autoneg |= AUTONEG_SPEED;
4759 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4761 /* allow advertising 1 speed */
4762 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4763 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4764 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4765 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4767 if (bp->phy_flags & PHY_SERDES_FLAG)
4770 advertising = cmd->advertising;
4773 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4774 advertising = cmd->advertising;
4776 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4780 if (bp->phy_flags & PHY_SERDES_FLAG) {
4781 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4784 advertising = ETHTOOL_ALL_COPPER_SPEED;
4787 advertising |= ADVERTISED_Autoneg;
4790 if (bp->phy_flags & PHY_SERDES_FLAG) {
4791 if ((cmd->speed != SPEED_1000 &&
4792 cmd->speed != SPEED_2500) ||
4793 (cmd->duplex != DUPLEX_FULL))
4796 if (cmd->speed == SPEED_2500 &&
4797 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4800 else if (cmd->speed == SPEED_1000) {
4803 autoneg &= ~AUTONEG_SPEED;
4804 req_line_speed = cmd->speed;
4805 req_duplex = cmd->duplex;
4809 bp->autoneg = autoneg;
4810 bp->advertising = advertising;
4811 bp->req_line_speed = req_line_speed;
4812 bp->req_duplex = req_duplex;
4814 spin_lock_bh(&bp->phy_lock);
4818 spin_unlock_bh(&bp->phy_lock);
4824 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4826 struct bnx2 *bp = netdev_priv(dev);
4828 strcpy(info->driver, DRV_MODULE_NAME);
4829 strcpy(info->version, DRV_MODULE_VERSION);
4830 strcpy(info->bus_info, pci_name(bp->pdev));
4831 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4832 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4833 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4834 info->fw_version[1] = info->fw_version[3] = '.';
4835 info->fw_version[5] = 0;
4838 #define BNX2_REGDUMP_LEN (32 * 1024)
4841 bnx2_get_regs_len(struct net_device *dev)
4843 return BNX2_REGDUMP_LEN;
4847 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4849 u32 *p = _p, i, offset;
4851 struct bnx2 *bp = netdev_priv(dev);
4852 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4853 0x0800, 0x0880, 0x0c00, 0x0c10,
4854 0x0c30, 0x0d08, 0x1000, 0x101c,
4855 0x1040, 0x1048, 0x1080, 0x10a4,
4856 0x1400, 0x1490, 0x1498, 0x14f0,
4857 0x1500, 0x155c, 0x1580, 0x15dc,
4858 0x1600, 0x1658, 0x1680, 0x16d8,
4859 0x1800, 0x1820, 0x1840, 0x1854,
4860 0x1880, 0x1894, 0x1900, 0x1984,
4861 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4862 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4863 0x2000, 0x2030, 0x23c0, 0x2400,
4864 0x2800, 0x2820, 0x2830, 0x2850,
4865 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4866 0x3c00, 0x3c94, 0x4000, 0x4010,
4867 0x4080, 0x4090, 0x43c0, 0x4458,
4868 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4869 0x4fc0, 0x5010, 0x53c0, 0x5444,
4870 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4871 0x5fc0, 0x6000, 0x6400, 0x6428,
4872 0x6800, 0x6848, 0x684c, 0x6860,
4873 0x6888, 0x6910, 0x8000 };
4877 memset(p, 0, BNX2_REGDUMP_LEN);
4879 if (!netif_running(bp->dev))
4883 offset = reg_boundaries[0];
4885 while (offset < BNX2_REGDUMP_LEN) {
4886 *p++ = REG_RD(bp, offset);
4888 if (offset == reg_boundaries[i + 1]) {
4889 offset = reg_boundaries[i + 2];
4890 p = (u32 *) (orig_p + offset);
4897 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4899 struct bnx2 *bp = netdev_priv(dev);
4901 if (bp->flags & NO_WOL_FLAG) {
4906 wol->supported = WAKE_MAGIC;
4908 wol->wolopts = WAKE_MAGIC;
4912 memset(&wol->sopass, 0, sizeof(wol->sopass));
4916 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4918 struct bnx2 *bp = netdev_priv(dev);
4920 if (wol->wolopts & ~WAKE_MAGIC)
4923 if (wol->wolopts & WAKE_MAGIC) {
4924 if (bp->flags & NO_WOL_FLAG)
4936 bnx2_nway_reset(struct net_device *dev)
4938 struct bnx2 *bp = netdev_priv(dev);
4941 if (!(bp->autoneg & AUTONEG_SPEED)) {
4945 spin_lock_bh(&bp->phy_lock);
4947 /* Force a link down visible on the other side */
4948 if (bp->phy_flags & PHY_SERDES_FLAG) {
4949 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4950 spin_unlock_bh(&bp->phy_lock);
4954 spin_lock_bh(&bp->phy_lock);
4956 bp->current_interval = SERDES_AN_TIMEOUT;
4957 bp->serdes_an_pending = 1;
4958 mod_timer(&bp->timer, jiffies + bp->current_interval);
4961 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4962 bmcr &= ~BMCR_LOOPBACK;
4963 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4965 spin_unlock_bh(&bp->phy_lock);
4971 bnx2_get_eeprom_len(struct net_device *dev)
4973 struct bnx2 *bp = netdev_priv(dev);
4975 if (bp->flash_info == NULL)
4978 return (int) bp->flash_size;
4982 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4985 struct bnx2 *bp = netdev_priv(dev);
4988 /* parameters already validated in ethtool_get_eeprom */
4990 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4996 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4999 struct bnx2 *bp = netdev_priv(dev);
5002 /* parameters already validated in ethtool_set_eeprom */
5004 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5010 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5012 struct bnx2 *bp = netdev_priv(dev);
5014 memset(coal, 0, sizeof(struct ethtool_coalesce));
5016 coal->rx_coalesce_usecs = bp->rx_ticks;
5017 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5018 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5019 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5021 coal->tx_coalesce_usecs = bp->tx_ticks;
5022 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5023 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5024 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5026 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5032 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5034 struct bnx2 *bp = netdev_priv(dev);
5036 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5037 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5039 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5040 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5042 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5043 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5045 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5046 if (bp->rx_quick_cons_trip_int > 0xff)
5047 bp->rx_quick_cons_trip_int = 0xff;
5049 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5050 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5052 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5053 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5055 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5056 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5058 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5059 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5062 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5063 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5064 bp->stats_ticks &= 0xffff00;
5066 if (netif_running(bp->dev)) {
5067 bnx2_netif_stop(bp);
5069 bnx2_netif_start(bp);
5076 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5078 struct bnx2 *bp = netdev_priv(dev);
5080 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5081 ering->rx_mini_max_pending = 0;
5082 ering->rx_jumbo_max_pending = 0;
5084 ering->rx_pending = bp->rx_ring_size;
5085 ering->rx_mini_pending = 0;
5086 ering->rx_jumbo_pending = 0;
5088 ering->tx_max_pending = MAX_TX_DESC_CNT;
5089 ering->tx_pending = bp->tx_ring_size;
5093 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5095 struct bnx2 *bp = netdev_priv(dev);
5097 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5098 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5099 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5103 if (netif_running(bp->dev)) {
5104 bnx2_netif_stop(bp);
5105 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5110 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5111 bp->tx_ring_size = ering->tx_pending;
5113 if (netif_running(bp->dev)) {
5116 rc = bnx2_alloc_mem(bp);
5120 bnx2_netif_start(bp);
5127 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5129 struct bnx2 *bp = netdev_priv(dev);
5131 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5132 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5133 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5137 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5139 struct bnx2 *bp = netdev_priv(dev);
5141 bp->req_flow_ctrl = 0;
5142 if (epause->rx_pause)
5143 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5144 if (epause->tx_pause)
5145 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5147 if (epause->autoneg) {
5148 bp->autoneg |= AUTONEG_FLOW_CTRL;
5151 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5154 spin_lock_bh(&bp->phy_lock);
5158 spin_unlock_bh(&bp->phy_lock);
5164 bnx2_get_rx_csum(struct net_device *dev)
5166 struct bnx2 *bp = netdev_priv(dev);
5172 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5174 struct bnx2 *bp = netdev_priv(dev);
5181 bnx2_set_tso(struct net_device *dev, u32 data)
5184 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5186 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5190 #define BNX2_NUM_STATS 46
5193 char string[ETH_GSTRING_LEN];
5194 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5196 { "rx_error_bytes" },
5198 { "tx_error_bytes" },
5199 { "rx_ucast_packets" },
5200 { "rx_mcast_packets" },
5201 { "rx_bcast_packets" },
5202 { "tx_ucast_packets" },
5203 { "tx_mcast_packets" },
5204 { "tx_bcast_packets" },
5205 { "tx_mac_errors" },
5206 { "tx_carrier_errors" },
5207 { "rx_crc_errors" },
5208 { "rx_align_errors" },
5209 { "tx_single_collisions" },
5210 { "tx_multi_collisions" },
5212 { "tx_excess_collisions" },
5213 { "tx_late_collisions" },
5214 { "tx_total_collisions" },
5217 { "rx_undersize_packets" },
5218 { "rx_oversize_packets" },
5219 { "rx_64_byte_packets" },
5220 { "rx_65_to_127_byte_packets" },
5221 { "rx_128_to_255_byte_packets" },
5222 { "rx_256_to_511_byte_packets" },
5223 { "rx_512_to_1023_byte_packets" },
5224 { "rx_1024_to_1522_byte_packets" },
5225 { "rx_1523_to_9022_byte_packets" },
5226 { "tx_64_byte_packets" },
5227 { "tx_65_to_127_byte_packets" },
5228 { "tx_128_to_255_byte_packets" },
5229 { "tx_256_to_511_byte_packets" },
5230 { "tx_512_to_1023_byte_packets" },
5231 { "tx_1024_to_1522_byte_packets" },
5232 { "tx_1523_to_9022_byte_packets" },
5233 { "rx_xon_frames" },
5234 { "rx_xoff_frames" },
5235 { "tx_xon_frames" },
5236 { "tx_xoff_frames" },
5237 { "rx_mac_ctrl_frames" },
5238 { "rx_filtered_packets" },
5240 { "rx_fw_discards" },
5243 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5245 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5246 STATS_OFFSET32(stat_IfHCInOctets_hi),
5247 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5248 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5249 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5250 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5251 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5252 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5253 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5254 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5255 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5256 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5257 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5258 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5259 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5260 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5261 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5262 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5263 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5264 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5265 STATS_OFFSET32(stat_EtherStatsCollisions),
5266 STATS_OFFSET32(stat_EtherStatsFragments),
5267 STATS_OFFSET32(stat_EtherStatsJabbers),
5268 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5269 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5270 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5271 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5272 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5273 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5274 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5275 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5276 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5277 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5278 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5279 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5280 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5281 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5282 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5283 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5284 STATS_OFFSET32(stat_XonPauseFramesReceived),
5285 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5286 STATS_OFFSET32(stat_OutXonSent),
5287 STATS_OFFSET32(stat_OutXoffSent),
5288 STATS_OFFSET32(stat_MacControlFramesReceived),
5289 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5290 STATS_OFFSET32(stat_IfInMBUFDiscards),
5291 STATS_OFFSET32(stat_FwRxDrop),
5294 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5295 * skipped because of errata.
5297 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5298 8,0,8,8,8,8,8,8,8,8,
5299 4,0,4,4,4,4,4,4,4,4,
5300 4,4,4,4,4,4,4,4,4,4,
5301 4,4,4,4,4,4,4,4,4,4,
5305 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5306 8,0,8,8,8,8,8,8,8,8,
5307 4,4,4,4,4,4,4,4,4,4,
5308 4,4,4,4,4,4,4,4,4,4,
5309 4,4,4,4,4,4,4,4,4,4,
5313 #define BNX2_NUM_TESTS 6
5316 char string[ETH_GSTRING_LEN];
5317 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5318 { "register_test (offline)" },
5319 { "memory_test (offline)" },
5320 { "loopback_test (offline)" },
5321 { "nvram_test (online)" },
5322 { "interrupt_test (online)" },
5323 { "link_test (online)" },
5327 bnx2_self_test_count(struct net_device *dev)
5329 return BNX2_NUM_TESTS;
5333 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5335 struct bnx2 *bp = netdev_priv(dev);
5337 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5338 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5341 bnx2_netif_stop(bp);
5342 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5345 if (bnx2_test_registers(bp) != 0) {
5347 etest->flags |= ETH_TEST_FL_FAILED;
5349 if (bnx2_test_memory(bp) != 0) {
5351 etest->flags |= ETH_TEST_FL_FAILED;
5353 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5354 etest->flags |= ETH_TEST_FL_FAILED;
5356 if (!netif_running(bp->dev)) {
5357 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5361 bnx2_netif_start(bp);
5364 /* wait for link up */
5365 for (i = 0; i < 7; i++) {
5368 msleep_interruptible(1000);
5372 if (bnx2_test_nvram(bp) != 0) {
5374 etest->flags |= ETH_TEST_FL_FAILED;
5376 if (bnx2_test_intr(bp) != 0) {
5378 etest->flags |= ETH_TEST_FL_FAILED;
5381 if (bnx2_test_link(bp) != 0) {
5383 etest->flags |= ETH_TEST_FL_FAILED;
5389 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5391 switch (stringset) {
5393 memcpy(buf, bnx2_stats_str_arr,
5394 sizeof(bnx2_stats_str_arr));
5397 memcpy(buf, bnx2_tests_str_arr,
5398 sizeof(bnx2_tests_str_arr));
5404 bnx2_get_stats_count(struct net_device *dev)
5406 return BNX2_NUM_STATS;
5410 bnx2_get_ethtool_stats(struct net_device *dev,
5411 struct ethtool_stats *stats, u64 *buf)
5413 struct bnx2 *bp = netdev_priv(dev);
5415 u32 *hw_stats = (u32 *) bp->stats_blk;
5416 u8 *stats_len_arr = NULL;
5418 if (hw_stats == NULL) {
5419 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5423 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5424 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5425 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5426 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5427 stats_len_arr = bnx2_5706_stats_len_arr;
5429 stats_len_arr = bnx2_5708_stats_len_arr;
5431 for (i = 0; i < BNX2_NUM_STATS; i++) {
5432 if (stats_len_arr[i] == 0) {
5433 /* skip this counter */
5437 if (stats_len_arr[i] == 4) {
5438 /* 4-byte counter */
5440 *(hw_stats + bnx2_stats_offset_arr[i]);
5443 /* 8-byte counter */
5444 buf[i] = (((u64) *(hw_stats +
5445 bnx2_stats_offset_arr[i])) << 32) +
5446 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5451 bnx2_phys_id(struct net_device *dev, u32 data)
5453 struct bnx2 *bp = netdev_priv(dev);
5460 save = REG_RD(bp, BNX2_MISC_CFG);
5461 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5463 for (i = 0; i < (data * 2); i++) {
5465 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5468 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5469 BNX2_EMAC_LED_1000MB_OVERRIDE |
5470 BNX2_EMAC_LED_100MB_OVERRIDE |
5471 BNX2_EMAC_LED_10MB_OVERRIDE |
5472 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5473 BNX2_EMAC_LED_TRAFFIC);
5475 msleep_interruptible(500);
5476 if (signal_pending(current))
5479 REG_WR(bp, BNX2_EMAC_LED, 0);
5480 REG_WR(bp, BNX2_MISC_CFG, save);
5484 static const struct ethtool_ops bnx2_ethtool_ops = {
5485 .get_settings = bnx2_get_settings,
5486 .set_settings = bnx2_set_settings,
5487 .get_drvinfo = bnx2_get_drvinfo,
5488 .get_regs_len = bnx2_get_regs_len,
5489 .get_regs = bnx2_get_regs,
5490 .get_wol = bnx2_get_wol,
5491 .set_wol = bnx2_set_wol,
5492 .nway_reset = bnx2_nway_reset,
5493 .get_link = ethtool_op_get_link,
5494 .get_eeprom_len = bnx2_get_eeprom_len,
5495 .get_eeprom = bnx2_get_eeprom,
5496 .set_eeprom = bnx2_set_eeprom,
5497 .get_coalesce = bnx2_get_coalesce,
5498 .set_coalesce = bnx2_set_coalesce,
5499 .get_ringparam = bnx2_get_ringparam,
5500 .set_ringparam = bnx2_set_ringparam,
5501 .get_pauseparam = bnx2_get_pauseparam,
5502 .set_pauseparam = bnx2_set_pauseparam,
5503 .get_rx_csum = bnx2_get_rx_csum,
5504 .set_rx_csum = bnx2_set_rx_csum,
5505 .get_tx_csum = ethtool_op_get_tx_csum,
5506 .set_tx_csum = ethtool_op_set_tx_csum,
5507 .get_sg = ethtool_op_get_sg,
5508 .set_sg = ethtool_op_set_sg,
5510 .get_tso = ethtool_op_get_tso,
5511 .set_tso = bnx2_set_tso,
5513 .self_test_count = bnx2_self_test_count,
5514 .self_test = bnx2_self_test,
5515 .get_strings = bnx2_get_strings,
5516 .phys_id = bnx2_phys_id,
5517 .get_stats_count = bnx2_get_stats_count,
5518 .get_ethtool_stats = bnx2_get_ethtool_stats,
5519 .get_perm_addr = ethtool_op_get_perm_addr,
5522 /* Called with rtnl_lock */
5524 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5526 struct mii_ioctl_data *data = if_mii(ifr);
5527 struct bnx2 *bp = netdev_priv(dev);
5532 data->phy_id = bp->phy_addr;
5538 spin_lock_bh(&bp->phy_lock);
5539 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5540 spin_unlock_bh(&bp->phy_lock);
5542 data->val_out = mii_regval;
5548 if (!capable(CAP_NET_ADMIN))
5551 spin_lock_bh(&bp->phy_lock);
5552 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5553 spin_unlock_bh(&bp->phy_lock);
5564 /* Called with rtnl_lock */
5566 bnx2_change_mac_addr(struct net_device *dev, void *p)
5568 struct sockaddr *addr = p;
5569 struct bnx2 *bp = netdev_priv(dev);
5571 if (!is_valid_ether_addr(addr->sa_data))
5574 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5575 if (netif_running(dev))
5576 bnx2_set_mac_addr(bp);
5581 /* Called with rtnl_lock */
5583 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5585 struct bnx2 *bp = netdev_priv(dev);
5587 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5588 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5592 if (netif_running(dev)) {
5593 bnx2_netif_stop(bp);
5597 bnx2_netif_start(bp);
5602 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5604 poll_bnx2(struct net_device *dev)
5606 struct bnx2 *bp = netdev_priv(dev);
5608 disable_irq(bp->pdev->irq);
5609 bnx2_interrupt(bp->pdev->irq, dev);
5610 enable_irq(bp->pdev->irq);
5614 static int __devinit
5615 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5618 unsigned long mem_len;
5622 SET_MODULE_OWNER(dev);
5623 SET_NETDEV_DEV(dev, &pdev->dev);
5624 bp = netdev_priv(dev);
5629 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5630 rc = pci_enable_device(pdev);
5632 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
5636 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5638 "Cannot find PCI device base address, aborting.\n");
5640 goto err_out_disable;
5643 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5645 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5646 goto err_out_disable;
5649 pci_set_master(pdev);
5651 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5652 if (bp->pm_cap == 0) {
5654 "Cannot find power management capability, aborting.\n");
5656 goto err_out_release;
5659 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5660 if (bp->pcix_cap == 0) {
5661 dev_err(&pdev->dev, "Cannot find PCIX capability, aborting.\n");
5663 goto err_out_release;
5666 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5667 bp->flags |= USING_DAC_FLAG;
5668 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5670 "pci_set_consistent_dma_mask failed, aborting.\n");
5672 goto err_out_release;
5675 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5676 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5678 goto err_out_release;
5684 spin_lock_init(&bp->phy_lock);
5685 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5687 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5688 mem_len = MB_GET_CID_ADDR(17);
5689 dev->mem_end = dev->mem_start + mem_len;
5690 dev->irq = pdev->irq;
5692 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5695 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
5697 goto err_out_release;
5700 /* Configure byte swap and enable write to the reg_window registers.
5701 * Rely on CPU to do target byte swapping on big endian systems
5702 * The chip's target access swapping will not swap all accesses
5704 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5705 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5706 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5708 bnx2_set_power_state(bp, PCI_D0);
5710 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5712 /* Get bus information. */
5713 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5714 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5717 bp->flags |= PCIX_FLAG;
5719 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5721 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5723 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5724 bp->bus_speed_mhz = 133;
5727 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5728 bp->bus_speed_mhz = 100;
5731 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5732 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5733 bp->bus_speed_mhz = 66;
5736 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5737 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5738 bp->bus_speed_mhz = 50;
5741 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5742 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5743 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5744 bp->bus_speed_mhz = 33;
5749 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5750 bp->bus_speed_mhz = 66;
5752 bp->bus_speed_mhz = 33;
5755 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5756 bp->flags |= PCI_32BIT_FLAG;
5758 /* 5706A0 may falsely detect SERR and PERR. */
5759 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5760 reg = REG_RD(bp, PCI_COMMAND);
5761 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5762 REG_WR(bp, PCI_COMMAND, reg);
5764 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5765 !(bp->flags & PCIX_FLAG)) {
5768 "5706 A1 can only be used in a PCIX bus, aborting.\n");
5772 bnx2_init_nvram(bp);
5774 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5776 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5777 BNX2_SHM_HDR_SIGNATURE_SIG)
5778 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5780 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5782 /* Get the permanent MAC address. First we need to make sure the
5783 * firmware is actually running.
5785 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5787 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5788 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5789 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
5794 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5796 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5797 bp->mac_addr[0] = (u8) (reg >> 8);
5798 bp->mac_addr[1] = (u8) reg;
5800 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5801 bp->mac_addr[2] = (u8) (reg >> 24);
5802 bp->mac_addr[3] = (u8) (reg >> 16);
5803 bp->mac_addr[4] = (u8) (reg >> 8);
5804 bp->mac_addr[5] = (u8) reg;
5806 bp->tx_ring_size = MAX_TX_DESC_CNT;
5807 bnx2_set_rx_ring_size(bp, 255);
5811 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5813 bp->tx_quick_cons_trip_int = 20;
5814 bp->tx_quick_cons_trip = 20;
5815 bp->tx_ticks_int = 80;
5818 bp->rx_quick_cons_trip_int = 6;
5819 bp->rx_quick_cons_trip = 6;
5820 bp->rx_ticks_int = 18;
5823 bp->stats_ticks = 1000000 & 0xffff00;
5825 bp->timer_interval = HZ;
5826 bp->current_interval = HZ;
5830 /* Disable WOL support if we are running on a SERDES chip. */
5831 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5832 bp->phy_flags |= PHY_SERDES_FLAG;
5833 bp->flags |= NO_WOL_FLAG;
5834 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5836 reg = REG_RD_IND(bp, bp->shmem_base +
5837 BNX2_SHARED_HW_CFG_CONFIG);
5838 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5839 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5843 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5844 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5845 (CHIP_ID(bp) == CHIP_ID_5708_B1))
5846 bp->flags |= NO_WOL_FLAG;
5848 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5849 bp->tx_quick_cons_trip_int =
5850 bp->tx_quick_cons_trip;
5851 bp->tx_ticks_int = bp->tx_ticks;
5852 bp->rx_quick_cons_trip_int =
5853 bp->rx_quick_cons_trip;
5854 bp->rx_ticks_int = bp->rx_ticks;
5855 bp->comp_prod_trip_int = bp->comp_prod_trip;
5856 bp->com_ticks_int = bp->com_ticks;
5857 bp->cmd_ticks_int = bp->cmd_ticks;
5860 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5862 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5863 * with byte enables disabled on the unused 32-bit word. This is legal
5864 * but causes problems on the AMD 8132 which will eventually stop
5865 * responding after a while.
5867 * AMD believes this incompatibility is unique to the 5706, and
5868 * prefers to locally disable MSI rather than globally disabling it
5869 * using pci_msi_quirk.
5871 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5872 struct pci_dev *amd_8132 = NULL;
5874 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5875 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5879 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5880 if (rev >= 0x10 && rev <= 0x13) {
5882 pci_dev_put(amd_8132);
5888 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5889 bp->req_line_speed = 0;
5890 if (bp->phy_flags & PHY_SERDES_FLAG) {
5891 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5893 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5894 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5895 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5897 bp->req_line_speed = bp->line_speed = SPEED_1000;
5898 bp->req_duplex = DUPLEX_FULL;
5902 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5905 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5907 init_timer(&bp->timer);
5908 bp->timer.expires = RUN_AT(bp->timer_interval);
5909 bp->timer.data = (unsigned long) bp;
5910 bp->timer.function = bnx2_timer;
5916 iounmap(bp->regview);
5921 pci_release_regions(pdev);
5924 pci_disable_device(pdev);
5925 pci_set_drvdata(pdev, NULL);
5931 static int __devinit
5932 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5934 static int version_printed = 0;
5935 struct net_device *dev = NULL;
5939 if (version_printed++ == 0)
5940 printk(KERN_INFO "%s", version);
5942 /* dev zeroed in init_etherdev */
5943 dev = alloc_etherdev(sizeof(*bp));
5948 rc = bnx2_init_board(pdev, dev);
5954 dev->open = bnx2_open;
5955 dev->hard_start_xmit = bnx2_start_xmit;
5956 dev->stop = bnx2_close;
5957 dev->get_stats = bnx2_get_stats;
5958 dev->set_multicast_list = bnx2_set_rx_mode;
5959 dev->do_ioctl = bnx2_ioctl;
5960 dev->set_mac_address = bnx2_change_mac_addr;
5961 dev->change_mtu = bnx2_change_mtu;
5962 dev->tx_timeout = bnx2_tx_timeout;
5963 dev->watchdog_timeo = TX_TIMEOUT;
5965 dev->vlan_rx_register = bnx2_vlan_rx_register;
5966 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5968 dev->poll = bnx2_poll;
5969 dev->ethtool_ops = &bnx2_ethtool_ops;
5972 bp = netdev_priv(dev);
5974 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5975 dev->poll_controller = poll_bnx2;
5978 if ((rc = register_netdev(dev))) {
5979 dev_err(&pdev->dev, "Cannot register net device\n");
5981 iounmap(bp->regview);
5982 pci_release_regions(pdev);
5983 pci_disable_device(pdev);
5984 pci_set_drvdata(pdev, NULL);
5989 pci_set_drvdata(pdev, dev);
5991 memcpy(dev->dev_addr, bp->mac_addr, 6);
5992 memcpy(dev->perm_addr, bp->mac_addr, 6);
5993 bp->name = board_info[ent->driver_data].name,
5994 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5998 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5999 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6000 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6001 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6006 printk("node addr ");
6007 for (i = 0; i < 6; i++)
6008 printk("%2.2x", dev->dev_addr[i]);
6011 dev->features |= NETIF_F_SG;
6012 if (bp->flags & USING_DAC_FLAG)
6013 dev->features |= NETIF_F_HIGHDMA;
6014 dev->features |= NETIF_F_IP_CSUM;
6016 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6019 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6022 netif_carrier_off(bp->dev);
6027 static void __devexit
6028 bnx2_remove_one(struct pci_dev *pdev)
6030 struct net_device *dev = pci_get_drvdata(pdev);
6031 struct bnx2 *bp = netdev_priv(dev);
6033 flush_scheduled_work();
6035 unregister_netdev(dev);
6038 iounmap(bp->regview);
6041 pci_release_regions(pdev);
6042 pci_disable_device(pdev);
6043 pci_set_drvdata(pdev, NULL);
6047 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6049 struct net_device *dev = pci_get_drvdata(pdev);
6050 struct bnx2 *bp = netdev_priv(dev);
6053 if (!netif_running(dev))
6056 flush_scheduled_work();
6057 bnx2_netif_stop(bp);
6058 netif_device_detach(dev);
6059 del_timer_sync(&bp->timer);
6060 if (bp->flags & NO_WOL_FLAG)
6061 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6063 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6065 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6066 bnx2_reset_chip(bp, reset_code);
6068 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6073 bnx2_resume(struct pci_dev *pdev)
6075 struct net_device *dev = pci_get_drvdata(pdev);
6076 struct bnx2 *bp = netdev_priv(dev);
6078 if (!netif_running(dev))
6081 bnx2_set_power_state(bp, PCI_D0);
6082 netif_device_attach(dev);
6084 bnx2_netif_start(bp);
6088 static struct pci_driver bnx2_pci_driver = {
6089 .name = DRV_MODULE_NAME,
6090 .id_table = bnx2_pci_tbl,
6091 .probe = bnx2_init_one,
6092 .remove = __devexit_p(bnx2_remove_one),
6093 .suspend = bnx2_suspend,
6094 .resume = bnx2_resume,
6097 static int __init bnx2_init(void)
6099 return pci_register_driver(&bnx2_pci_driver);
6102 static void __exit bnx2_cleanup(void)
6104 pci_unregister_driver(&bnx2_pci_driver);
6107 module_init(bnx2_init);
6108 module_exit(bnx2_cleanup);