1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
46 #include <net/checksum.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/crc32c.h>
51 #include <linux/prefetch.h>
52 #include <linux/zlib.h>
55 #include "bnx2x_reg.h"
56 #include "bnx2x_fw_defs.h"
57 #include "bnx2x_hsi.h"
58 #include "bnx2x_link.h"
60 #include "bnx2x_init.h"
62 #define DRV_MODULE_VERSION "1.45.23"
63 #define DRV_MODULE_RELDATE "2008/11/03"
64 #define BNX2X_BC_VER 0x040200
66 /* Time in jiffies before concluding the transmitter is hung */
67 #define TX_TIMEOUT (5*HZ)
69 static char version[] __devinitdata =
70 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
71 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73 MODULE_AUTHOR("Eliezer Tamir");
74 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
75 MODULE_LICENSE("GPL");
76 MODULE_VERSION(DRV_MODULE_VERSION);
78 static int disable_tpa;
82 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
85 module_param(disable_tpa, int, 0);
86 module_param(use_inta, int, 0);
87 module_param(poll, int, 0);
88 module_param(debug, int, 0);
89 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
90 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
91 MODULE_PARM_DESC(poll, "use polling (for debug)");
92 MODULE_PARM_DESC(debug, "default debug msglevel");
95 module_param(use_multi, int, 0);
96 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
98 static struct workqueue_struct *bnx2x_wq;
100 enum bnx2x_board_type {
106 /* indexed by board_type, above */
109 } board_info[] __devinitdata = {
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
133 * locking is done by mcp
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
155 static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
182 struct dmae_command *dmae = &bp->init_dmae;
183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
195 mutex_lock(&bp->dmae_mutex);
197 memset(dmae, 0, sizeof(struct dmae_command));
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
205 DMAE_CMD_ENDIANITY_DW_SWAP |
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216 dmae->comp_val = DMAE_COMP_VAL;
218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
239 BNX2X_ERR("dmae timeout!\n");
243 /* adjust delay for emulation/FPGA */
244 if (CHIP_REV_IS_SLOW(bp))
250 mutex_unlock(&bp->dmae_mutex);
253 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
255 struct dmae_command *dmae = &bp->init_dmae;
256 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
259 if (!bp->dmae_ready) {
260 u32 *data = bnx2x_sp(bp, wb_data[0]);
263 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
264 " using indirect\n", src_addr, len32);
265 for (i = 0; i < len32; i++)
266 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
270 mutex_lock(&bp->dmae_mutex);
272 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273 memset(dmae, 0, sizeof(struct dmae_command));
275 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
279 DMAE_CMD_ENDIANITY_B_DW_SWAP |
281 DMAE_CMD_ENDIANITY_DW_SWAP |
283 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
285 dmae->src_addr_lo = src_addr >> 2;
286 dmae->src_addr_hi = 0;
287 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
290 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
292 dmae->comp_val = DMAE_COMP_VAL;
294 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
295 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
296 "dst_addr [%x:%08x (%08x)]\n"
297 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
298 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
304 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
308 while (*wb_comp != DMAE_COMP_VAL) {
311 BNX2X_ERR("dmae timeout!\n");
315 /* adjust delay for emulation/FPGA */
316 if (CHIP_REV_IS_SLOW(bp))
321 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
322 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
325 mutex_unlock(&bp->dmae_mutex);
328 /* used only for slowpath so not inlined */
329 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
333 wb_write[0] = val_hi;
334 wb_write[1] = val_lo;
335 REG_WR_DMAE(bp, reg, wb_write, 2);
339 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
343 REG_RD_DMAE(bp, reg, wb_data, 2);
345 return HILO_U64(wb_data[0], wb_data[1]);
349 static int bnx2x_mc_assert(struct bnx2x *bp)
353 u32 row0, row1, row2, row3;
356 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357 XSTORM_ASSERT_LIST_INDEX_OFFSET);
359 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
361 /* print the asserts */
362 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
364 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i));
366 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
373 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375 " 0x%08x 0x%08x 0x%08x\n",
376 i, row3, row2, row1, row0);
384 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385 TSTORM_ASSERT_LIST_INDEX_OFFSET);
387 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
389 /* print the asserts */
390 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
392 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i));
394 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
401 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403 " 0x%08x 0x%08x 0x%08x\n",
404 i, row3, row2, row1, row0);
412 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413 CSTORM_ASSERT_LIST_INDEX_OFFSET);
415 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
417 /* print the asserts */
418 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
420 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i));
422 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
429 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431 " 0x%08x 0x%08x 0x%08x\n",
432 i, row3, row2, row1, row0);
440 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441 USTORM_ASSERT_LIST_INDEX_OFFSET);
443 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
445 /* print the asserts */
446 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
448 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i));
450 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 4);
452 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 8);
454 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 12);
457 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459 " 0x%08x 0x%08x 0x%08x\n",
460 i, row3, row2, row1, row0);
470 static void bnx2x_fw_dump(struct bnx2x *bp)
476 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
477 mark = ((mark + 0x3) & ~0x3);
478 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
480 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481 for (word = 0; word < 8; word++)
482 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
485 printk(KERN_CONT "%s", (char *)data);
487 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488 for (word = 0; word < 8; word++)
489 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 printk(KERN_CONT "%s", (char *)data);
494 printk("\n" KERN_ERR PFX "end of fw dump\n");
497 static void bnx2x_panic_dump(struct bnx2x *bp)
502 bp->stats_state = STATS_STATE_DISABLED;
503 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
505 BNX2X_ERR("begin crash dump -----------------\n");
507 for_each_queue(bp, i) {
508 struct bnx2x_fastpath *fp = &bp->fp[i];
509 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
511 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
512 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
513 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
514 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
515 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
516 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
517 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
518 fp->rx_bd_prod, fp->rx_bd_cons,
519 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
520 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
521 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
522 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
523 " *sb_u_idx(%x) bd data(%x,%x)\n",
524 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
525 fp->status_blk->c_status_block.status_block_index,
527 fp->status_blk->u_status_block.status_block_index,
528 hw_prods->packets_prod, hw_prods->bds_prod);
530 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
531 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
532 for (j = start; j < end; j++) {
533 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
535 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
536 sw_bd->skb, sw_bd->first_bd);
539 start = TX_BD(fp->tx_bd_cons - 10);
540 end = TX_BD(fp->tx_bd_cons + 254);
541 for (j = start; j < end; j++) {
542 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
544 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
545 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
548 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
549 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
550 for (j = start; j < end; j++) {
551 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
552 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
554 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
555 j, rx_bd[1], rx_bd[0], sw_bd->skb);
558 start = RX_SGE(fp->rx_sge_prod);
559 end = RX_SGE(fp->last_max_sge);
560 for (j = start; j < end; j++) {
561 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
562 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
564 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
565 j, rx_sge[1], rx_sge[0], sw_page->page);
568 start = RCQ_BD(fp->rx_comp_cons - 10);
569 end = RCQ_BD(fp->rx_comp_cons + 503);
570 for (j = start; j < end; j++) {
571 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
573 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
574 j, cqe[0], cqe[1], cqe[2], cqe[3]);
578 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
579 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
580 " spq_prod_idx(%u)\n",
581 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
582 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
586 BNX2X_ERR("end crash dump -----------------\n");
589 static void bnx2x_int_enable(struct bnx2x *bp)
591 int port = BP_PORT(bp);
592 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
593 u32 val = REG_RD(bp, addr);
594 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
597 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
601 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
602 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
603 HC_CONFIG_0_REG_INT_LINE_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
606 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
607 val, port, addr, msix);
609 REG_WR(bp, addr, val);
611 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
614 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
615 val, port, addr, msix);
617 REG_WR(bp, addr, val);
619 if (CHIP_IS_E1H(bp)) {
620 /* init leading/trailing edge */
622 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
624 /* enable nig attention */
629 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
630 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
634 static void bnx2x_int_disable(struct bnx2x *bp)
636 int port = BP_PORT(bp);
637 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
638 u32 val = REG_RD(bp, addr);
640 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
641 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642 HC_CONFIG_0_REG_INT_LINE_EN_0 |
643 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
645 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
648 REG_WR(bp, addr, val);
649 if (REG_RD(bp, addr) != val)
650 BNX2X_ERR("BUG! proper val not read from IGU!\n");
653 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
655 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
658 /* disable interrupt handling */
659 atomic_inc(&bp->intr_sem);
661 /* prevent the HW from sending interrupts */
662 bnx2x_int_disable(bp);
664 /* make sure all ISRs are done */
666 for_each_queue(bp, i)
667 synchronize_irq(bp->msix_table[i].vector);
669 /* one more for the Slow Path IRQ */
670 synchronize_irq(bp->msix_table[i].vector);
672 synchronize_irq(bp->pdev->irq);
674 /* make sure sp_task is not running */
675 cancel_delayed_work(&bp->sp_task);
676 flush_workqueue(bnx2x_wq);
682 * General service functions
685 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
686 u8 storm, u16 index, u8 op, u8 update)
688 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
689 COMMAND_REG_INT_ACK);
690 struct igu_ack_register igu_ack;
692 igu_ack.status_block_index = index;
693 igu_ack.sb_id_and_flags =
694 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
695 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
696 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
697 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
699 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
700 (*(u32 *)&igu_ack), hc_addr);
701 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
704 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
706 struct host_status_block *fpsb = fp->status_blk;
709 barrier(); /* status block is written to by the chip */
710 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
711 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
714 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
715 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
721 static u16 bnx2x_ack_int(struct bnx2x *bp)
723 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
724 COMMAND_REG_SIMD_MASK);
725 u32 result = REG_RD(bp, hc_addr);
727 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
735 * fast path service functions
738 /* free skb in the packet ring at pos idx
739 * return idx of last bd freed
741 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
744 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
745 struct eth_tx_bd *tx_bd;
746 struct sk_buff *skb = tx_buf->skb;
747 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
750 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
754 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
755 tx_bd = &fp->tx_desc_ring[bd_idx];
756 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
757 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
759 nbd = le16_to_cpu(tx_bd->nbd) - 1;
760 new_cons = nbd + tx_buf->first_bd;
761 #ifdef BNX2X_STOP_ON_ERROR
762 if (nbd > (MAX_SKB_FRAGS + 2)) {
763 BNX2X_ERR("BAD nbd!\n");
768 /* Skip a parse bd and the TSO split header bd
769 since they have no mapping */
771 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
773 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
774 ETH_TX_BD_FLAGS_TCP_CSUM |
775 ETH_TX_BD_FLAGS_SW_LSO)) {
777 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
778 tx_bd = &fp->tx_desc_ring[bd_idx];
779 /* is this a TSO split header bd? */
780 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
782 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
789 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
790 tx_bd = &fp->tx_desc_ring[bd_idx];
791 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
792 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
794 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
800 tx_buf->first_bd = 0;
806 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
812 barrier(); /* Tell compiler that prod and cons can change */
813 prod = fp->tx_bd_prod;
814 cons = fp->tx_bd_cons;
816 /* NUM_TX_RINGS = number of "next-page" entries
817 It will be used as a threshold */
818 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
820 #ifdef BNX2X_STOP_ON_ERROR
822 WARN_ON(used > fp->bp->tx_ring_size);
823 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
826 return (s16)(fp->bp->tx_ring_size) - used;
829 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
831 struct bnx2x *bp = fp->bp;
832 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
835 #ifdef BNX2X_STOP_ON_ERROR
836 if (unlikely(bp->panic))
840 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
841 sw_cons = fp->tx_pkt_cons;
843 while (sw_cons != hw_cons) {
846 pkt_cons = TX_BD(sw_cons);
848 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
850 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
851 hw_cons, sw_cons, pkt_cons);
853 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
855 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
858 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
866 fp->tx_pkt_cons = sw_cons;
867 fp->tx_bd_cons = bd_cons;
869 /* Need to make the tx_cons update visible to start_xmit()
870 * before checking for netif_queue_stopped(). Without the
871 * memory barrier, there is a small possibility that start_xmit()
872 * will miss it and cause the queue to be stopped forever.
876 /* TBD need a thresh? */
877 if (unlikely(netif_queue_stopped(bp->dev))) {
879 netif_tx_lock(bp->dev);
881 if (netif_queue_stopped(bp->dev) &&
882 (bp->state == BNX2X_STATE_OPEN) &&
883 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
884 netif_wake_queue(bp->dev);
886 netif_tx_unlock(bp->dev);
891 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
892 union eth_rx_cqe *rr_cqe)
894 struct bnx2x *bp = fp->bp;
895 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
896 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
899 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
900 FP_IDX(fp), cid, command, bp->state,
901 rr_cqe->ramrod_cqe.ramrod_type);
906 switch (command | fp->state) {
907 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
908 BNX2X_FP_STATE_OPENING):
909 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
911 fp->state = BNX2X_FP_STATE_OPEN;
914 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
915 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
917 fp->state = BNX2X_FP_STATE_HALTED;
921 BNX2X_ERR("unexpected MC reply (%d) "
922 "fp->state is %x\n", command, fp->state);
925 mb(); /* force bnx2x_wait_ramrod() to see the change */
929 switch (command | bp->state) {
930 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
931 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
932 bp->state = BNX2X_STATE_OPEN;
935 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
936 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
937 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
938 fp->state = BNX2X_FP_STATE_HALTED;
941 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
942 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
943 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
947 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
948 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
949 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
950 bp->set_mac_pending = 0;
953 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
954 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
958 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
962 mb(); /* force bnx2x_wait_ramrod() to see the change */
965 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
966 struct bnx2x_fastpath *fp, u16 index)
968 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
969 struct page *page = sw_buf->page;
970 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
972 /* Skip "next page" elements */
976 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
977 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
978 __free_pages(page, PAGES_PER_SGE_SHIFT);
985 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
986 struct bnx2x_fastpath *fp, int last)
990 for (i = 0; i < last; i++)
991 bnx2x_free_rx_sge(bp, fp, i);
994 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
995 struct bnx2x_fastpath *fp, u16 index)
997 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
998 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
999 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1002 if (unlikely(page == NULL))
1005 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1006 PCI_DMA_FROMDEVICE);
1007 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1008 __free_pages(page, PAGES_PER_SGE_SHIFT);
1012 sw_buf->page = page;
1013 pci_unmap_addr_set(sw_buf, mapping, mapping);
1015 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1016 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1021 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1022 struct bnx2x_fastpath *fp, u16 index)
1024 struct sk_buff *skb;
1025 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1026 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1029 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1030 if (unlikely(skb == NULL))
1033 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1034 PCI_DMA_FROMDEVICE);
1035 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1041 pci_unmap_addr_set(rx_buf, mapping, mapping);
1043 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1044 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1049 /* note that we are not allocating a new skb,
1050 * we are just moving one from cons to prod
1051 * we are not creating a new mapping,
1052 * so there is no need to check for dma_mapping_error().
1054 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1055 struct sk_buff *skb, u16 cons, u16 prod)
1057 struct bnx2x *bp = fp->bp;
1058 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1059 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1060 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1061 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1063 pci_dma_sync_single_for_device(bp->pdev,
1064 pci_unmap_addr(cons_rx_buf, mapping),
1065 bp->rx_offset + RX_COPY_THRESH,
1066 PCI_DMA_FROMDEVICE);
1068 prod_rx_buf->skb = cons_rx_buf->skb;
1069 pci_unmap_addr_set(prod_rx_buf, mapping,
1070 pci_unmap_addr(cons_rx_buf, mapping));
1071 *prod_bd = *cons_bd;
1074 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1077 u16 last_max = fp->last_max_sge;
1079 if (SUB_S16(idx, last_max) > 0)
1080 fp->last_max_sge = idx;
1083 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1087 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1088 int idx = RX_SGE_CNT * i - 1;
1090 for (j = 0; j < 2; j++) {
1091 SGE_MASK_CLEAR_BIT(fp, idx);
1097 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1098 struct eth_fast_path_rx_cqe *fp_cqe)
1100 struct bnx2x *bp = fp->bp;
1101 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1102 le16_to_cpu(fp_cqe->len_on_bd)) >>
1104 u16 last_max, last_elem, first_elem;
1111 /* First mark all used pages */
1112 for (i = 0; i < sge_len; i++)
1113 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1115 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1116 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1118 /* Here we assume that the last SGE index is the biggest */
1119 prefetch((void *)(fp->sge_mask));
1120 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1122 last_max = RX_SGE(fp->last_max_sge);
1123 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1124 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1126 /* If ring is not full */
1127 if (last_elem + 1 != first_elem)
1130 /* Now update the prod */
1131 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1132 if (likely(fp->sge_mask[i]))
1135 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1136 delta += RX_SGE_MASK_ELEM_SZ;
1140 fp->rx_sge_prod += delta;
1141 /* clear page-end entries */
1142 bnx2x_clear_sge_mask_next_elems(fp);
1145 DP(NETIF_MSG_RX_STATUS,
1146 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1147 fp->last_max_sge, fp->rx_sge_prod);
1150 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1152 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1153 memset(fp->sge_mask, 0xff,
1154 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1156 /* Clear the two last indices in the page to 1:
1157 these are the indices that correspond to the "next" element,
1158 hence will never be indicated and should be removed from
1159 the calculations. */
1160 bnx2x_clear_sge_mask_next_elems(fp);
1163 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1164 struct sk_buff *skb, u16 cons, u16 prod)
1166 struct bnx2x *bp = fp->bp;
1167 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1168 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1169 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1172 /* move empty skb from pool to prod and map it */
1173 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1174 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1175 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1176 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1178 /* move partial skb from cons to pool (don't unmap yet) */
1179 fp->tpa_pool[queue] = *cons_rx_buf;
1181 /* mark bin state as start - print error if current state != stop */
1182 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1183 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1185 fp->tpa_state[queue] = BNX2X_TPA_START;
1187 /* point prod_bd to new skb */
1188 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1189 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1191 #ifdef BNX2X_STOP_ON_ERROR
1192 fp->tpa_queue_used |= (1 << queue);
1193 #ifdef __powerpc64__
1194 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1196 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1198 fp->tpa_queue_used);
1202 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1203 struct sk_buff *skb,
1204 struct eth_fast_path_rx_cqe *fp_cqe,
1207 struct sw_rx_page *rx_pg, old_rx_pg;
1208 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1209 u32 i, frag_len, frag_size, pages;
1213 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1214 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1216 /* This is needed in order to enable forwarding support */
1218 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1219 max(frag_size, (u32)len_on_bd));
1221 #ifdef BNX2X_STOP_ON_ERROR
1223 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1224 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1226 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1227 fp_cqe->pkt_len, len_on_bd);
1233 /* Run through the SGL and compose the fragmented skb */
1234 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1235 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1237 /* FW gives the indices of the SGE as if the ring is an array
1238 (meaning that "next" element will consume 2 indices) */
1239 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1240 rx_pg = &fp->rx_page_ring[sge_idx];
1243 /* If we fail to allocate a substitute page, we simply stop
1244 where we are and drop the whole packet */
1245 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1246 if (unlikely(err)) {
1247 bp->eth_stats.rx_skb_alloc_failed++;
1251 /* Unmap the page as we r going to pass it to the stack */
1252 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1253 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1255 /* Add one frag and update the appropriate fields in the skb */
1256 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1258 skb->data_len += frag_len;
1259 skb->truesize += frag_len;
1260 skb->len += frag_len;
1262 frag_size -= frag_len;
1268 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1269 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1272 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1273 struct sk_buff *skb = rx_buf->skb;
1275 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1277 /* Unmap skb in the pool anyway, as we are going to change
1278 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1280 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1281 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1283 if (likely(new_skb)) {
1284 /* fix ip xsum and give it to the stack */
1285 /* (no need to map the new skb) */
1288 prefetch(((char *)(skb)) + 128);
1290 #ifdef BNX2X_STOP_ON_ERROR
1291 if (pad + len > bp->rx_buf_size) {
1292 BNX2X_ERR("skb_put is about to fail... "
1293 "pad %d len %d rx_buf_size %d\n",
1294 pad, len, bp->rx_buf_size);
1300 skb_reserve(skb, pad);
1303 skb->protocol = eth_type_trans(skb, bp->dev);
1304 skb->ip_summed = CHECKSUM_UNNECESSARY;
1309 iph = (struct iphdr *)skb->data;
1311 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1314 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1315 &cqe->fast_path_cqe, cqe_idx)) {
1317 if ((bp->vlgrp != NULL) &&
1318 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1319 PARSING_FLAGS_VLAN))
1320 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1321 le16_to_cpu(cqe->fast_path_cqe.
1325 netif_receive_skb(skb);
1327 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1328 " - dropping packet!\n");
1333 /* put new skb in bin */
1334 fp->tpa_pool[queue].skb = new_skb;
1337 /* else drop the packet and keep the buffer in the bin */
1338 DP(NETIF_MSG_RX_STATUS,
1339 "Failed to allocate new skb - dropping packet!\n");
1340 bp->eth_stats.rx_skb_alloc_failed++;
1343 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1346 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1347 struct bnx2x_fastpath *fp,
1348 u16 bd_prod, u16 rx_comp_prod,
1351 struct tstorm_eth_rx_producers rx_prods = {0};
1354 /* Update producers */
1355 rx_prods.bd_prod = bd_prod;
1356 rx_prods.cqe_prod = rx_comp_prod;
1357 rx_prods.sge_prod = rx_sge_prod;
1360 * Make sure that the BD and SGE data is updated before updating the
1361 * producers since FW might read the BD/SGE right after the producer
1363 * This is only applicable for weak-ordered memory model archs such
1364 * as IA-64. The following barrier is also mandatory since FW will
1365 * assumes BDs must have buffers.
1369 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1370 REG_WR(bp, BAR_TSTRORM_INTMEM +
1371 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1372 ((u32 *)&rx_prods)[i]);
1374 mmiowb(); /* keep prod updates ordered */
1376 DP(NETIF_MSG_RX_STATUS,
1377 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1378 bd_prod, rx_comp_prod, rx_sge_prod);
1381 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1383 struct bnx2x *bp = fp->bp;
1384 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1385 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1388 #ifdef BNX2X_STOP_ON_ERROR
1389 if (unlikely(bp->panic))
1393 /* CQ "next element" is of the size of the regular element,
1394 that's why it's ok here */
1395 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1396 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1399 bd_cons = fp->rx_bd_cons;
1400 bd_prod = fp->rx_bd_prod;
1401 bd_prod_fw = bd_prod;
1402 sw_comp_cons = fp->rx_comp_cons;
1403 sw_comp_prod = fp->rx_comp_prod;
1405 /* Memory barrier necessary as speculative reads of the rx
1406 * buffer can be ahead of the index in the status block
1410 DP(NETIF_MSG_RX_STATUS,
1411 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1412 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1414 while (sw_comp_cons != hw_comp_cons) {
1415 struct sw_rx_bd *rx_buf = NULL;
1416 struct sk_buff *skb;
1417 union eth_rx_cqe *cqe;
1421 comp_ring_cons = RCQ_BD(sw_comp_cons);
1422 bd_prod = RX_BD(bd_prod);
1423 bd_cons = RX_BD(bd_cons);
1425 cqe = &fp->rx_comp_ring[comp_ring_cons];
1426 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1428 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1429 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1430 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1431 cqe->fast_path_cqe.rss_hash_result,
1432 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1433 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1435 /* is this a slowpath msg? */
1436 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1437 bnx2x_sp_event(fp, cqe);
1440 /* this is an rx packet */
1442 rx_buf = &fp->rx_buf_ring[bd_cons];
1444 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1445 pad = cqe->fast_path_cqe.placement_offset;
1447 /* If CQE is marked both TPA_START and TPA_END
1448 it is a non-TPA CQE */
1449 if ((!fp->disable_tpa) &&
1450 (TPA_TYPE(cqe_fp_flags) !=
1451 (TPA_TYPE_START | TPA_TYPE_END))) {
1452 u16 queue = cqe->fast_path_cqe.queue_index;
1454 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1455 DP(NETIF_MSG_RX_STATUS,
1456 "calling tpa_start on queue %d\n",
1459 bnx2x_tpa_start(fp, queue, skb,
1464 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1465 DP(NETIF_MSG_RX_STATUS,
1466 "calling tpa_stop on queue %d\n",
1469 if (!BNX2X_RX_SUM_FIX(cqe))
1470 BNX2X_ERR("STOP on none TCP "
1473 /* This is a size of the linear data
1475 len = le16_to_cpu(cqe->fast_path_cqe.
1477 bnx2x_tpa_stop(bp, fp, queue, pad,
1478 len, cqe, comp_ring_cons);
1479 #ifdef BNX2X_STOP_ON_ERROR
1484 bnx2x_update_sge_prod(fp,
1485 &cqe->fast_path_cqe);
1490 pci_dma_sync_single_for_device(bp->pdev,
1491 pci_unmap_addr(rx_buf, mapping),
1492 pad + RX_COPY_THRESH,
1493 PCI_DMA_FROMDEVICE);
1495 prefetch(((char *)(skb)) + 128);
1497 /* is this an error packet? */
1498 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1499 DP(NETIF_MSG_RX_ERR,
1500 "ERROR flags %x rx packet %u\n",
1501 cqe_fp_flags, sw_comp_cons);
1502 bp->eth_stats.rx_err_discard_pkt++;
1506 /* Since we don't have a jumbo ring
1507 * copy small packets if mtu > 1500
1509 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1510 (len <= RX_COPY_THRESH)) {
1511 struct sk_buff *new_skb;
1513 new_skb = netdev_alloc_skb(bp->dev,
1515 if (new_skb == NULL) {
1516 DP(NETIF_MSG_RX_ERR,
1517 "ERROR packet dropped "
1518 "because of alloc failure\n");
1519 bp->eth_stats.rx_skb_alloc_failed++;
1524 skb_copy_from_linear_data_offset(skb, pad,
1525 new_skb->data + pad, len);
1526 skb_reserve(new_skb, pad);
1527 skb_put(new_skb, len);
1529 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1533 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1534 pci_unmap_single(bp->pdev,
1535 pci_unmap_addr(rx_buf, mapping),
1537 PCI_DMA_FROMDEVICE);
1538 skb_reserve(skb, pad);
1542 DP(NETIF_MSG_RX_ERR,
1543 "ERROR packet dropped because "
1544 "of alloc failure\n");
1545 bp->eth_stats.rx_skb_alloc_failed++;
1547 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1551 skb->protocol = eth_type_trans(skb, bp->dev);
1553 skb->ip_summed = CHECKSUM_NONE;
1555 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1556 skb->ip_summed = CHECKSUM_UNNECESSARY;
1558 bp->eth_stats.hw_csum_err++;
1563 if ((bp->vlgrp != NULL) &&
1564 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1565 PARSING_FLAGS_VLAN))
1566 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1567 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1570 netif_receive_skb(skb);
1576 bd_cons = NEXT_RX_IDX(bd_cons);
1577 bd_prod = NEXT_RX_IDX(bd_prod);
1578 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1581 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1582 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1584 if (rx_pkt == budget)
1588 fp->rx_bd_cons = bd_cons;
1589 fp->rx_bd_prod = bd_prod_fw;
1590 fp->rx_comp_cons = sw_comp_cons;
1591 fp->rx_comp_prod = sw_comp_prod;
1593 /* Update producers */
1594 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1597 fp->rx_pkt += rx_pkt;
1603 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1605 struct bnx2x_fastpath *fp = fp_cookie;
1606 struct bnx2x *bp = fp->bp;
1607 int index = FP_IDX(fp);
1609 /* Return here if interrupt is disabled */
1610 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1611 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1615 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1616 index, FP_SB_ID(fp));
1617 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1619 #ifdef BNX2X_STOP_ON_ERROR
1620 if (unlikely(bp->panic))
1624 prefetch(fp->rx_cons_sb);
1625 prefetch(fp->tx_cons_sb);
1626 prefetch(&fp->status_blk->c_status_block.status_block_index);
1627 prefetch(&fp->status_blk->u_status_block.status_block_index);
1629 netif_rx_schedule(&bnx2x_fp(bp, index, napi));
1634 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1636 struct net_device *dev = dev_instance;
1637 struct bnx2x *bp = netdev_priv(dev);
1638 u16 status = bnx2x_ack_int(bp);
1641 /* Return here if interrupt is shared and it's not for us */
1642 if (unlikely(status == 0)) {
1643 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1646 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1648 /* Return here if interrupt is disabled */
1649 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1650 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1654 #ifdef BNX2X_STOP_ON_ERROR
1655 if (unlikely(bp->panic))
1659 mask = 0x2 << bp->fp[0].sb_id;
1660 if (status & mask) {
1661 struct bnx2x_fastpath *fp = &bp->fp[0];
1663 prefetch(fp->rx_cons_sb);
1664 prefetch(fp->tx_cons_sb);
1665 prefetch(&fp->status_blk->c_status_block.status_block_index);
1666 prefetch(&fp->status_blk->u_status_block.status_block_index);
1668 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
1674 if (unlikely(status & 0x1)) {
1675 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1683 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1689 /* end of fast path */
1691 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1696 * General service functions
1699 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1702 u32 resource_bit = (1 << resource);
1703 int func = BP_FUNC(bp);
1704 u32 hw_lock_control_reg;
1707 /* Validating that the resource is within range */
1708 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1710 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1711 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1716 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1718 hw_lock_control_reg =
1719 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1722 /* Validating that the resource is not already taken */
1723 lock_status = REG_RD(bp, hw_lock_control_reg);
1724 if (lock_status & resource_bit) {
1725 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1726 lock_status, resource_bit);
1730 /* Try for 5 second every 5ms */
1731 for (cnt = 0; cnt < 1000; cnt++) {
1732 /* Try to acquire the lock */
1733 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1734 lock_status = REG_RD(bp, hw_lock_control_reg);
1735 if (lock_status & resource_bit)
1740 DP(NETIF_MSG_HW, "Timeout\n");
1744 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1747 u32 resource_bit = (1 << resource);
1748 int func = BP_FUNC(bp);
1749 u32 hw_lock_control_reg;
1751 /* Validating that the resource is within range */
1752 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1754 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1755 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1760 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1762 hw_lock_control_reg =
1763 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1766 /* Validating that the resource is currently taken */
1767 lock_status = REG_RD(bp, hw_lock_control_reg);
1768 if (!(lock_status & resource_bit)) {
1769 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1770 lock_status, resource_bit);
1774 REG_WR(bp, hw_lock_control_reg, resource_bit);
1778 /* HW Lock for shared dual port PHYs */
1779 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1781 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1783 mutex_lock(&bp->port.phy_mutex);
1785 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1786 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1787 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1790 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1792 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1794 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1795 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1796 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1798 mutex_unlock(&bp->port.phy_mutex);
1801 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1803 /* The GPIO should be swapped if swap register is set and active */
1804 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1805 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1806 int gpio_shift = gpio_num +
1807 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1808 u32 gpio_mask = (1 << gpio_shift);
1811 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1812 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1816 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1817 /* read GPIO and mask except the float bits */
1818 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1821 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1822 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1823 gpio_num, gpio_shift);
1824 /* clear FLOAT and set CLR */
1825 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1826 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1829 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1830 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1831 gpio_num, gpio_shift);
1832 /* clear FLOAT and set SET */
1833 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1834 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1837 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1838 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1839 gpio_num, gpio_shift);
1841 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1848 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1849 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1854 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1856 u32 spio_mask = (1 << spio_num);
1859 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1860 (spio_num > MISC_REGISTERS_SPIO_7)) {
1861 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1865 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1866 /* read SPIO and mask except the float bits */
1867 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1870 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1871 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1872 /* clear FLOAT and set CLR */
1873 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1874 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1877 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1878 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1879 /* clear FLOAT and set SET */
1880 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1881 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1884 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1885 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1887 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1894 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1895 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1900 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1902 switch (bp->link_vars.ieee_fc &
1903 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1904 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1905 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1908 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1909 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1912 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1913 bp->port.advertising |= ADVERTISED_Asym_Pause;
1916 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1922 static void bnx2x_link_report(struct bnx2x *bp)
1924 if (bp->link_vars.link_up) {
1925 if (bp->state == BNX2X_STATE_OPEN)
1926 netif_carrier_on(bp->dev);
1927 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1929 printk("%d Mbps ", bp->link_vars.line_speed);
1931 if (bp->link_vars.duplex == DUPLEX_FULL)
1932 printk("full duplex");
1934 printk("half duplex");
1936 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1937 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1938 printk(", receive ");
1939 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1940 printk("& transmit ");
1942 printk(", transmit ");
1944 printk("flow control ON");
1948 } else { /* link_down */
1949 netif_carrier_off(bp->dev);
1950 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1954 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1956 if (!BP_NOMCP(bp)) {
1959 /* Initialize link parameters structure variables */
1960 /* It is recommended to turn off RX FC for jumbo frames
1961 for better performance */
1963 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1964 else if (bp->dev->mtu > 5000)
1965 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1967 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1969 bnx2x_acquire_phy_lock(bp);
1970 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1971 bnx2x_release_phy_lock(bp);
1973 if (bp->link_vars.link_up)
1974 bnx2x_link_report(bp);
1976 bnx2x_calc_fc_adv(bp);
1980 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1984 static void bnx2x_link_set(struct bnx2x *bp)
1986 if (!BP_NOMCP(bp)) {
1987 bnx2x_acquire_phy_lock(bp);
1988 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1989 bnx2x_release_phy_lock(bp);
1991 bnx2x_calc_fc_adv(bp);
1993 BNX2X_ERR("Bootcode is missing -not setting link\n");
1996 static void bnx2x__link_reset(struct bnx2x *bp)
1998 if (!BP_NOMCP(bp)) {
1999 bnx2x_acquire_phy_lock(bp);
2000 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2001 bnx2x_release_phy_lock(bp);
2003 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2006 static u8 bnx2x_link_test(struct bnx2x *bp)
2010 bnx2x_acquire_phy_lock(bp);
2011 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2012 bnx2x_release_phy_lock(bp);
2017 /* Calculates the sum of vn_min_rates.
2018 It's needed for further normalizing of the min_rates.
2023 0 - if all the min_rates are 0.
2024 In the later case fairness algorithm should be deactivated.
2025 If not all min_rates are zero then those that are zeroes will
2028 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2030 int i, port = BP_PORT(bp);
2034 for (i = 0; i < E1HVN_MAX; i++) {
2036 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2037 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2038 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2039 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2040 /* If min rate is zero - set it to 1 */
2042 vn_min_rate = DEF_MIN_RATE;
2046 wsum += vn_min_rate;
2050 /* ... only if all min rates are zeros - disable FAIRNESS */
2057 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2060 struct cmng_struct_per_port *m_cmng_port)
2062 u32 r_param = port_rate / 8;
2063 int port = BP_PORT(bp);
2066 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2068 /* Enable minmax only if we are in e1hmf mode */
2070 u32 fair_periodic_timeout_usec;
2073 /* Enable rate shaping and fairness */
2074 m_cmng_port->flags.cmng_vn_enable = 1;
2075 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2076 m_cmng_port->flags.rate_shaping_enable = 1;
2079 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2080 " fairness will be disabled\n");
2082 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2083 m_cmng_port->rs_vars.rs_periodic_timeout =
2084 RS_PERIODIC_TIMEOUT_USEC / 4;
2086 /* this is the threshold below which no timer arming will occur
2087 1.25 coefficient is for the threshold to be a little bigger
2088 than the real time, to compensate for timer in-accuracy */
2089 m_cmng_port->rs_vars.rs_threshold =
2090 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2092 /* resolution of fairness timer */
2093 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2094 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2095 t_fair = T_FAIR_COEF / port_rate;
2097 /* this is the threshold below which we won't arm
2098 the timer anymore */
2099 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2101 /* we multiply by 1e3/8 to get bytes/msec.
2102 We don't want the credits to pass a credit
2103 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2104 m_cmng_port->fair_vars.upper_bound =
2105 r_param * t_fair * FAIR_MEM;
2106 /* since each tick is 4 usec */
2107 m_cmng_port->fair_vars.fairness_timeout =
2108 fair_periodic_timeout_usec / 4;
2111 /* Disable rate shaping and fairness */
2112 m_cmng_port->flags.cmng_vn_enable = 0;
2113 m_cmng_port->flags.fairness_enable = 0;
2114 m_cmng_port->flags.rate_shaping_enable = 0;
2117 "Single function mode minmax will be disabled\n");
2120 /* Store it to internal memory */
2121 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2122 REG_WR(bp, BAR_XSTRORM_INTMEM +
2123 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2124 ((u32 *)(m_cmng_port))[i]);
2127 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2128 u32 wsum, u16 port_rate,
2129 struct cmng_struct_per_port *m_cmng_port)
2131 struct rate_shaping_vars_per_vn m_rs_vn;
2132 struct fairness_vars_per_vn m_fair_vn;
2133 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2134 u16 vn_min_rate, vn_max_rate;
2137 /* If function is hidden - set min and max to zeroes */
2138 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2143 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2144 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2145 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2146 if current min rate is zero - set it to 1.
2147 This is a requirement of the algorithm. */
2148 if ((vn_min_rate == 0) && wsum)
2149 vn_min_rate = DEF_MIN_RATE;
2150 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2151 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2154 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2155 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2157 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2158 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2160 /* global vn counter - maximal Mbps for this vn */
2161 m_rs_vn.vn_counter.rate = vn_max_rate;
2163 /* quota - number of bytes transmitted in this period */
2164 m_rs_vn.vn_counter.quota =
2165 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2167 #ifdef BNX2X_PER_PROT_QOS
2168 /* per protocol counter */
2169 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2170 /* maximal Mbps for this protocol */
2171 m_rs_vn.protocol_counters[protocol].rate =
2172 protocol_max_rate[protocol];
2173 /* the quota in each timer period -
2174 number of bytes transmitted in this period */
2175 m_rs_vn.protocol_counters[protocol].quota =
2176 (u32)(rs_periodic_timeout_usec *
2178 protocol_counters[protocol].rate/8));
2183 /* credit for each period of the fairness algorithm:
2184 number of bytes in T_FAIR (the vn share the port rate).
2185 wsum should not be larger than 10000, thus
2186 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2187 m_fair_vn.vn_credit_delta =
2188 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2189 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2190 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2191 m_fair_vn.vn_credit_delta);
2194 #ifdef BNX2X_PER_PROT_QOS
2196 u32 protocolWeightSum = 0;
2198 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2199 protocolWeightSum +=
2200 drvInit.protocol_min_rate[protocol];
2201 /* per protocol counter -
2202 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2203 if (protocolWeightSum > 0) {
2205 protocol < NUM_OF_PROTOCOLS; protocol++)
2206 /* credit for each period of the
2207 fairness algorithm - number of bytes in
2208 T_FAIR (the protocol share the vn rate) */
2209 m_fair_vn.protocol_credit_delta[protocol] =
2210 (u32)((vn_min_rate / 8) * t_fair *
2211 protocol_min_rate / protocolWeightSum);
2216 /* Store it to internal memory */
2217 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2218 REG_WR(bp, BAR_XSTRORM_INTMEM +
2219 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2220 ((u32 *)(&m_rs_vn))[i]);
2222 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2223 REG_WR(bp, BAR_XSTRORM_INTMEM +
2224 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2225 ((u32 *)(&m_fair_vn))[i]);
2228 /* This function is called upon link interrupt */
2229 static void bnx2x_link_attn(struct bnx2x *bp)
2233 /* Make sure that we are synced with the current statistics */
2234 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2236 bnx2x_acquire_phy_lock(bp);
2237 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2238 bnx2x_release_phy_lock(bp);
2240 if (bp->link_vars.link_up) {
2242 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2243 struct host_port_stats *pstats;
2245 pstats = bnx2x_sp(bp, port_stats);
2246 /* reset old bmac stats */
2247 memset(&(pstats->mac_stx[0]), 0,
2248 sizeof(struct mac_stx));
2250 if ((bp->state == BNX2X_STATE_OPEN) ||
2251 (bp->state == BNX2X_STATE_DISABLED))
2252 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2255 /* indicate link status */
2256 bnx2x_link_report(bp);
2261 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2262 if (vn == BP_E1HVN(bp))
2265 func = ((vn << 1) | BP_PORT(bp));
2267 /* Set the attention towards other drivers
2269 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2270 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2274 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2275 struct cmng_struct_per_port m_cmng_port;
2277 int port = BP_PORT(bp);
2279 /* Init RATE SHAPING and FAIRNESS contexts */
2280 wsum = bnx2x_calc_vn_wsum(bp);
2281 bnx2x_init_port_minmax(bp, (int)wsum,
2282 bp->link_vars.line_speed,
2285 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2286 bnx2x_init_vn_minmax(bp, 2*vn + port,
2287 wsum, bp->link_vars.line_speed,
2292 static void bnx2x__link_status_update(struct bnx2x *bp)
2294 if (bp->state != BNX2X_STATE_OPEN)
2297 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2299 if (bp->link_vars.link_up)
2300 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2302 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2304 /* indicate link status */
2305 bnx2x_link_report(bp);
2308 static void bnx2x_pmf_update(struct bnx2x *bp)
2310 int port = BP_PORT(bp);
2314 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2316 /* enable nig attention */
2317 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2318 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2319 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2321 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2329 * General service functions
2332 /* the slow path queue is odd since completions arrive on the fastpath ring */
2333 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2334 u32 data_hi, u32 data_lo, int common)
2336 int func = BP_FUNC(bp);
2338 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2339 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2340 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2341 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2342 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2344 #ifdef BNX2X_STOP_ON_ERROR
2345 if (unlikely(bp->panic))
2349 spin_lock_bh(&bp->spq_lock);
2351 if (!bp->spq_left) {
2352 BNX2X_ERR("BUG! SPQ ring full!\n");
2353 spin_unlock_bh(&bp->spq_lock);
2358 /* CID needs port number to be encoded int it */
2359 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2360 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2362 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2364 bp->spq_prod_bd->hdr.type |=
2365 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2367 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2368 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2372 if (bp->spq_prod_bd == bp->spq_last_bd) {
2373 bp->spq_prod_bd = bp->spq;
2374 bp->spq_prod_idx = 0;
2375 DP(NETIF_MSG_TIMER, "end of spq\n");
2382 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2385 spin_unlock_bh(&bp->spq_lock);
2389 /* acquire split MCP access lock register */
2390 static int bnx2x_acquire_alr(struct bnx2x *bp)
2397 for (j = 0; j < i*10; j++) {
2399 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2400 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2401 if (val & (1L << 31))
2406 if (!(val & (1L << 31))) {
2407 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2414 /* release split MCP access lock register */
2415 static void bnx2x_release_alr(struct bnx2x *bp)
2419 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2422 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2424 struct host_def_status_block *def_sb = bp->def_status_blk;
2427 barrier(); /* status block is written to by the chip */
2428 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2429 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2432 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2433 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2436 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2437 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2440 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2441 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2444 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2445 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2452 * slow path service functions
2455 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2457 int port = BP_PORT(bp);
2458 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2459 COMMAND_REG_ATTN_BITS_SET);
2460 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2461 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2462 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2463 NIG_REG_MASK_INTERRUPT_PORT0;
2466 if (bp->attn_state & asserted)
2467 BNX2X_ERR("IGU ERROR\n");
2469 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2470 aeu_mask = REG_RD(bp, aeu_addr);
2472 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2473 aeu_mask, asserted);
2474 aeu_mask &= ~(asserted & 0xff);
2475 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2477 REG_WR(bp, aeu_addr, aeu_mask);
2478 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2480 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2481 bp->attn_state |= asserted;
2482 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2484 if (asserted & ATTN_HARD_WIRED_MASK) {
2485 if (asserted & ATTN_NIG_FOR_FUNC) {
2487 /* save nig interrupt mask */
2488 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2489 REG_WR(bp, nig_int_mask_addr, 0);
2491 bnx2x_link_attn(bp);
2493 /* handle unicore attn? */
2495 if (asserted & ATTN_SW_TIMER_4_FUNC)
2496 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2498 if (asserted & GPIO_2_FUNC)
2499 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2501 if (asserted & GPIO_3_FUNC)
2502 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2504 if (asserted & GPIO_4_FUNC)
2505 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2508 if (asserted & ATTN_GENERAL_ATTN_1) {
2509 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2510 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2512 if (asserted & ATTN_GENERAL_ATTN_2) {
2513 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2514 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2516 if (asserted & ATTN_GENERAL_ATTN_3) {
2517 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2518 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2521 if (asserted & ATTN_GENERAL_ATTN_4) {
2522 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2523 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2525 if (asserted & ATTN_GENERAL_ATTN_5) {
2526 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2527 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2529 if (asserted & ATTN_GENERAL_ATTN_6) {
2530 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2531 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2535 } /* if hardwired */
2537 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2539 REG_WR(bp, hc_addr, asserted);
2541 /* now set back the mask */
2542 if (asserted & ATTN_NIG_FOR_FUNC)
2543 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2546 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2548 int port = BP_PORT(bp);
2552 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2553 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2555 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2557 val = REG_RD(bp, reg_offset);
2558 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2559 REG_WR(bp, reg_offset, val);
2561 BNX2X_ERR("SPIO5 hw attention\n");
2563 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2564 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2565 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2566 /* Fan failure attention */
2568 /* The PHY reset is controlled by GPIO 1 */
2569 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2570 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2571 /* Low power mode is controlled by GPIO 2 */
2572 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2573 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2574 /* mark the failure */
2575 bp->link_params.ext_phy_config &=
2576 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2577 bp->link_params.ext_phy_config |=
2578 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2580 dev_info.port_hw_config[port].
2581 external_phy_config,
2582 bp->link_params.ext_phy_config);
2583 /* log the failure */
2584 printk(KERN_ERR PFX "Fan Failure on Network"
2585 " Controller %s has caused the driver to"
2586 " shutdown the card to prevent permanent"
2587 " damage. Please contact Dell Support for"
2588 " assistance\n", bp->dev->name);
2596 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2598 val = REG_RD(bp, reg_offset);
2599 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2600 REG_WR(bp, reg_offset, val);
2602 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2603 (attn & HW_INTERRUT_ASSERT_SET_0));
2608 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2612 if (attn & BNX2X_DOORQ_ASSERT) {
2614 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2615 BNX2X_ERR("DB hw attention 0x%x\n", val);
2616 /* DORQ discard attention */
2618 BNX2X_ERR("FATAL error from DORQ\n");
2621 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2623 int port = BP_PORT(bp);
2626 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2627 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2629 val = REG_RD(bp, reg_offset);
2630 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2631 REG_WR(bp, reg_offset, val);
2633 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2634 (attn & HW_INTERRUT_ASSERT_SET_1));
2639 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2643 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2645 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2646 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2647 /* CFC error attention */
2649 BNX2X_ERR("FATAL error from CFC\n");
2652 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2654 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2655 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2656 /* RQ_USDMDP_FIFO_OVERFLOW */
2658 BNX2X_ERR("FATAL error from PXP\n");
2661 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2663 int port = BP_PORT(bp);
2666 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2667 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2669 val = REG_RD(bp, reg_offset);
2670 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2671 REG_WR(bp, reg_offset, val);
2673 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2674 (attn & HW_INTERRUT_ASSERT_SET_2));
2679 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2683 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2685 if (attn & BNX2X_PMF_LINK_ASSERT) {
2686 int func = BP_FUNC(bp);
2688 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2689 bnx2x__link_status_update(bp);
2690 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2692 bnx2x_pmf_update(bp);
2694 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2696 BNX2X_ERR("MC assert!\n");
2697 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2698 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2699 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2700 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2703 } else if (attn & BNX2X_MCP_ASSERT) {
2705 BNX2X_ERR("MCP assert!\n");
2706 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2710 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2713 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2714 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2715 if (attn & BNX2X_GRC_TIMEOUT) {
2716 val = CHIP_IS_E1H(bp) ?
2717 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2718 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2720 if (attn & BNX2X_GRC_RSV) {
2721 val = CHIP_IS_E1H(bp) ?
2722 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2723 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2725 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2729 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2731 struct attn_route attn;
2732 struct attn_route group_mask;
2733 int port = BP_PORT(bp);
2739 /* need to take HW lock because MCP or other port might also
2740 try to handle this event */
2741 bnx2x_acquire_alr(bp);
2743 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2744 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2745 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2746 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2747 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2748 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2750 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2751 if (deasserted & (1 << index)) {
2752 group_mask = bp->attn_group[index];
2754 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2755 index, group_mask.sig[0], group_mask.sig[1],
2756 group_mask.sig[2], group_mask.sig[3]);
2758 bnx2x_attn_int_deasserted3(bp,
2759 attn.sig[3] & group_mask.sig[3]);
2760 bnx2x_attn_int_deasserted1(bp,
2761 attn.sig[1] & group_mask.sig[1]);
2762 bnx2x_attn_int_deasserted2(bp,
2763 attn.sig[2] & group_mask.sig[2]);
2764 bnx2x_attn_int_deasserted0(bp,
2765 attn.sig[0] & group_mask.sig[0]);
2767 if ((attn.sig[0] & group_mask.sig[0] &
2768 HW_PRTY_ASSERT_SET_0) ||
2769 (attn.sig[1] & group_mask.sig[1] &
2770 HW_PRTY_ASSERT_SET_1) ||
2771 (attn.sig[2] & group_mask.sig[2] &
2772 HW_PRTY_ASSERT_SET_2))
2773 BNX2X_ERR("FATAL HW block parity attention\n");
2777 bnx2x_release_alr(bp);
2779 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2782 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2784 REG_WR(bp, reg_addr, val);
2786 if (~bp->attn_state & deasserted)
2787 BNX2X_ERR("IGU ERROR\n");
2789 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2790 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2792 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2793 aeu_mask = REG_RD(bp, reg_addr);
2795 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2796 aeu_mask, deasserted);
2797 aeu_mask |= (deasserted & 0xff);
2798 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2800 REG_WR(bp, reg_addr, aeu_mask);
2801 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2803 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2804 bp->attn_state &= ~deasserted;
2805 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2808 static void bnx2x_attn_int(struct bnx2x *bp)
2810 /* read local copy of bits */
2811 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2812 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2813 u32 attn_state = bp->attn_state;
2815 /* look for changed bits */
2816 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2817 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2820 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2821 attn_bits, attn_ack, asserted, deasserted);
2823 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2824 BNX2X_ERR("BAD attention state\n");
2826 /* handle bits that were raised */
2828 bnx2x_attn_int_asserted(bp, asserted);
2831 bnx2x_attn_int_deasserted(bp, deasserted);
2834 static void bnx2x_sp_task(struct work_struct *work)
2836 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2840 /* Return here if interrupt is disabled */
2841 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2842 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2846 status = bnx2x_update_dsb_idx(bp);
2847 /* if (status == 0) */
2848 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2850 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2856 /* CStorm events: query_stats, port delete ramrod */
2858 bp->stats_pending = 0;
2860 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2862 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2864 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2866 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2868 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2873 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2875 struct net_device *dev = dev_instance;
2876 struct bnx2x *bp = netdev_priv(dev);
2878 /* Return here if interrupt is disabled */
2879 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2880 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2884 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2886 #ifdef BNX2X_STOP_ON_ERROR
2887 if (unlikely(bp->panic))
2891 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2896 /* end of slow path */
2900 /****************************************************************************
2902 ****************************************************************************/
2904 /* sum[hi:lo] += add[hi:lo] */
2905 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2908 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2911 /* difference = minuend - subtrahend */
2912 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2914 if (m_lo < s_lo) { \
2916 d_hi = m_hi - s_hi; \
2918 /* we can 'loan' 1 */ \
2920 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2922 /* m_hi <= s_hi */ \
2927 /* m_lo >= s_lo */ \
2928 if (m_hi < s_hi) { \
2932 /* m_hi >= s_hi */ \
2933 d_hi = m_hi - s_hi; \
2934 d_lo = m_lo - s_lo; \
2939 #define UPDATE_STAT64(s, t) \
2941 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2942 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2943 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2944 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2945 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2946 pstats->mac_stx[1].t##_lo, diff.lo); \
2949 #define UPDATE_STAT64_NIG(s, t) \
2951 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2952 diff.lo, new->s##_lo, old->s##_lo); \
2953 ADD_64(estats->t##_hi, diff.hi, \
2954 estats->t##_lo, diff.lo); \
2957 /* sum[hi:lo] += add */
2958 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2961 s_hi += (s_lo < a) ? 1 : 0; \
2964 #define UPDATE_EXTEND_STAT(s) \
2966 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2967 pstats->mac_stx[1].s##_lo, \
2971 #define UPDATE_EXTEND_TSTAT(s, t) \
2973 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2974 old_tclient->s = le32_to_cpu(tclient->s); \
2975 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2978 #define UPDATE_EXTEND_XSTAT(s, t) \
2980 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2981 old_xclient->s = le32_to_cpu(xclient->s); \
2982 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2986 * General service functions
2989 static inline long bnx2x_hilo(u32 *hiref)
2991 u32 lo = *(hiref + 1);
2992 #if (BITS_PER_LONG == 64)
2995 return HILO_U64(hi, lo);
3002 * Init service functions
3005 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3007 if (!bp->stats_pending) {
3008 struct eth_query_ramrod_data ramrod_data = {0};
3011 ramrod_data.drv_counter = bp->stats_counter++;
3012 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3013 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3015 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3016 ((u32 *)&ramrod_data)[1],
3017 ((u32 *)&ramrod_data)[0], 0);
3019 /* stats ramrod has it's own slot on the spq */
3021 bp->stats_pending = 1;
3026 static void bnx2x_stats_init(struct bnx2x *bp)
3028 int port = BP_PORT(bp);
3030 bp->executer_idx = 0;
3031 bp->stats_counter = 0;
3035 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3037 bp->port.port_stx = 0;
3038 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3040 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3041 bp->port.old_nig_stats.brb_discard =
3042 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3043 bp->port.old_nig_stats.brb_truncate =
3044 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3045 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3046 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3047 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3048 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3050 /* function stats */
3051 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3052 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3053 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3054 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3056 bp->stats_state = STATS_STATE_DISABLED;
3057 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3058 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3061 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3063 struct dmae_command *dmae = &bp->stats_dmae;
3064 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3066 *stats_comp = DMAE_COMP_VAL;
3069 if (bp->executer_idx) {
3070 int loader_idx = PMF_DMAE_C(bp);
3072 memset(dmae, 0, sizeof(struct dmae_command));
3074 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3075 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3076 DMAE_CMD_DST_RESET |
3078 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3080 DMAE_CMD_ENDIANITY_DW_SWAP |
3082 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3084 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3085 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3086 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3087 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3088 sizeof(struct dmae_command) *
3089 (loader_idx + 1)) >> 2;
3090 dmae->dst_addr_hi = 0;
3091 dmae->len = sizeof(struct dmae_command) >> 2;
3094 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3095 dmae->comp_addr_hi = 0;
3099 bnx2x_post_dmae(bp, dmae, loader_idx);
3101 } else if (bp->func_stx) {
3103 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3107 static int bnx2x_stats_comp(struct bnx2x *bp)
3109 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3113 while (*stats_comp != DMAE_COMP_VAL) {
3115 BNX2X_ERR("timeout waiting for stats finished\n");
3125 * Statistics service functions
3128 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3130 struct dmae_command *dmae;
3132 int loader_idx = PMF_DMAE_C(bp);
3133 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3136 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3137 BNX2X_ERR("BUG!\n");
3141 bp->executer_idx = 0;
3143 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3145 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3147 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3149 DMAE_CMD_ENDIANITY_DW_SWAP |
3151 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3152 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3154 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3155 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3156 dmae->src_addr_lo = bp->port.port_stx >> 2;
3157 dmae->src_addr_hi = 0;
3158 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3159 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3160 dmae->len = DMAE_LEN32_RD_MAX;
3161 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3162 dmae->comp_addr_hi = 0;
3165 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3166 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3167 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3168 dmae->src_addr_hi = 0;
3169 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3170 DMAE_LEN32_RD_MAX * 4);
3171 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3172 DMAE_LEN32_RD_MAX * 4);
3173 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3174 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3175 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3176 dmae->comp_val = DMAE_COMP_VAL;
3179 bnx2x_hw_stats_post(bp);
3180 bnx2x_stats_comp(bp);
3183 static void bnx2x_port_stats_init(struct bnx2x *bp)
3185 struct dmae_command *dmae;
3186 int port = BP_PORT(bp);
3187 int vn = BP_E1HVN(bp);
3189 int loader_idx = PMF_DMAE_C(bp);
3191 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3194 if (!bp->link_vars.link_up || !bp->port.pmf) {
3195 BNX2X_ERR("BUG!\n");
3199 bp->executer_idx = 0;
3202 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3203 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3204 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3206 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3208 DMAE_CMD_ENDIANITY_DW_SWAP |
3210 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3211 (vn << DMAE_CMD_E1HVN_SHIFT));
3213 if (bp->port.port_stx) {
3215 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3216 dmae->opcode = opcode;
3217 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3218 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3219 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3220 dmae->dst_addr_hi = 0;
3221 dmae->len = sizeof(struct host_port_stats) >> 2;
3222 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3223 dmae->comp_addr_hi = 0;
3229 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3230 dmae->opcode = opcode;
3231 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3232 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3233 dmae->dst_addr_lo = bp->func_stx >> 2;
3234 dmae->dst_addr_hi = 0;
3235 dmae->len = sizeof(struct host_func_stats) >> 2;
3236 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3237 dmae->comp_addr_hi = 0;
3242 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3243 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3244 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3246 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3248 DMAE_CMD_ENDIANITY_DW_SWAP |
3250 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3251 (vn << DMAE_CMD_E1HVN_SHIFT));
3253 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3255 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3256 NIG_REG_INGRESS_BMAC0_MEM);
3258 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3259 BIGMAC_REGISTER_TX_STAT_GTBYT */
3260 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3261 dmae->opcode = opcode;
3262 dmae->src_addr_lo = (mac_addr +
3263 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3264 dmae->src_addr_hi = 0;
3265 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3266 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3267 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3268 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3269 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3270 dmae->comp_addr_hi = 0;
3273 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3274 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3275 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3276 dmae->opcode = opcode;
3277 dmae->src_addr_lo = (mac_addr +
3278 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3279 dmae->src_addr_hi = 0;
3280 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3281 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3282 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3283 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3284 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3285 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3286 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3287 dmae->comp_addr_hi = 0;
3290 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3292 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3294 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3295 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3296 dmae->opcode = opcode;
3297 dmae->src_addr_lo = (mac_addr +
3298 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3299 dmae->src_addr_hi = 0;
3300 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3301 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3302 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3303 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3304 dmae->comp_addr_hi = 0;
3307 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3308 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3309 dmae->opcode = opcode;
3310 dmae->src_addr_lo = (mac_addr +
3311 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3312 dmae->src_addr_hi = 0;
3313 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3314 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3315 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3316 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3318 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3319 dmae->comp_addr_hi = 0;
3322 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3323 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3324 dmae->opcode = opcode;
3325 dmae->src_addr_lo = (mac_addr +
3326 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3327 dmae->src_addr_hi = 0;
3328 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3329 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3330 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3331 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3332 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3333 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3334 dmae->comp_addr_hi = 0;
3339 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3340 dmae->opcode = opcode;
3341 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3342 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3343 dmae->src_addr_hi = 0;
3344 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3345 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3346 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3347 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3348 dmae->comp_addr_hi = 0;
3351 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3352 dmae->opcode = opcode;
3353 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3354 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3355 dmae->src_addr_hi = 0;
3356 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3357 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3358 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3359 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3360 dmae->len = (2*sizeof(u32)) >> 2;
3361 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3362 dmae->comp_addr_hi = 0;
3365 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3366 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3367 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3368 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3370 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3372 DMAE_CMD_ENDIANITY_DW_SWAP |
3374 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3375 (vn << DMAE_CMD_E1HVN_SHIFT));
3376 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3377 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3378 dmae->src_addr_hi = 0;
3379 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3380 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3381 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3382 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3383 dmae->len = (2*sizeof(u32)) >> 2;
3384 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3385 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3386 dmae->comp_val = DMAE_COMP_VAL;
3391 static void bnx2x_func_stats_init(struct bnx2x *bp)
3393 struct dmae_command *dmae = &bp->stats_dmae;
3394 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3397 if (!bp->func_stx) {
3398 BNX2X_ERR("BUG!\n");
3402 bp->executer_idx = 0;
3403 memset(dmae, 0, sizeof(struct dmae_command));
3405 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3406 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3407 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3409 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3411 DMAE_CMD_ENDIANITY_DW_SWAP |
3413 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3414 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3415 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3416 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3417 dmae->dst_addr_lo = bp->func_stx >> 2;
3418 dmae->dst_addr_hi = 0;
3419 dmae->len = sizeof(struct host_func_stats) >> 2;
3420 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3421 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3422 dmae->comp_val = DMAE_COMP_VAL;
3427 static void bnx2x_stats_start(struct bnx2x *bp)
3430 bnx2x_port_stats_init(bp);
3432 else if (bp->func_stx)
3433 bnx2x_func_stats_init(bp);
3435 bnx2x_hw_stats_post(bp);
3436 bnx2x_storm_stats_post(bp);
3439 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3441 bnx2x_stats_comp(bp);
3442 bnx2x_stats_pmf_update(bp);
3443 bnx2x_stats_start(bp);
3446 static void bnx2x_stats_restart(struct bnx2x *bp)
3448 bnx2x_stats_comp(bp);
3449 bnx2x_stats_start(bp);
3452 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3454 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3455 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3456 struct regpair diff;
3458 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3459 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3460 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3461 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3462 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3463 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3464 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3465 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3466 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3467 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3468 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3469 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3470 UPDATE_STAT64(tx_stat_gt127,
3471 tx_stat_etherstatspkts65octetsto127octets);
3472 UPDATE_STAT64(tx_stat_gt255,
3473 tx_stat_etherstatspkts128octetsto255octets);
3474 UPDATE_STAT64(tx_stat_gt511,
3475 tx_stat_etherstatspkts256octetsto511octets);
3476 UPDATE_STAT64(tx_stat_gt1023,
3477 tx_stat_etherstatspkts512octetsto1023octets);
3478 UPDATE_STAT64(tx_stat_gt1518,
3479 tx_stat_etherstatspkts1024octetsto1522octets);
3480 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3481 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3482 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3483 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3484 UPDATE_STAT64(tx_stat_gterr,
3485 tx_stat_dot3statsinternalmactransmiterrors);
3486 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3489 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3491 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3492 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3494 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3495 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3496 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3497 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3498 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3499 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3500 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3501 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3502 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3503 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3504 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3505 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3506 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3507 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3508 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3509 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3510 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3511 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3512 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3513 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3514 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3515 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3516 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3517 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3518 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3519 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3520 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3521 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3522 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3523 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3524 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3527 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3529 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3530 struct nig_stats *old = &(bp->port.old_nig_stats);
3531 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3532 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3533 struct regpair diff;
3535 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3536 bnx2x_bmac_stats_update(bp);
3538 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3539 bnx2x_emac_stats_update(bp);
3541 else { /* unreached */
3542 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3546 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3547 new->brb_discard - old->brb_discard);
3548 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3549 new->brb_truncate - old->brb_truncate);
3551 UPDATE_STAT64_NIG(egress_mac_pkt0,
3552 etherstatspkts1024octetsto1522octets);
3553 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3555 memcpy(old, new, sizeof(struct nig_stats));
3557 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3558 sizeof(struct mac_stx));
3559 estats->brb_drop_hi = pstats->brb_drop_hi;
3560 estats->brb_drop_lo = pstats->brb_drop_lo;
3562 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3567 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3569 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3570 int cl_id = BP_CL_ID(bp);
3571 struct tstorm_per_port_stats *tport =
3572 &stats->tstorm_common.port_statistics;
3573 struct tstorm_per_client_stats *tclient =
3574 &stats->tstorm_common.client_statistics[cl_id];
3575 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3576 struct xstorm_per_client_stats *xclient =
3577 &stats->xstorm_common.client_statistics[cl_id];
3578 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3579 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3580 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3583 /* are storm stats valid? */
3584 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3585 bp->stats_counter) {
3586 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3587 " tstorm counter (%d) != stats_counter (%d)\n",
3588 tclient->stats_counter, bp->stats_counter);
3591 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3592 bp->stats_counter) {
3593 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3594 " xstorm counter (%d) != stats_counter (%d)\n",
3595 xclient->stats_counter, bp->stats_counter);
3599 fstats->total_bytes_received_hi =
3600 fstats->valid_bytes_received_hi =
3601 le32_to_cpu(tclient->total_rcv_bytes.hi);
3602 fstats->total_bytes_received_lo =
3603 fstats->valid_bytes_received_lo =
3604 le32_to_cpu(tclient->total_rcv_bytes.lo);
3606 estats->error_bytes_received_hi =
3607 le32_to_cpu(tclient->rcv_error_bytes.hi);
3608 estats->error_bytes_received_lo =
3609 le32_to_cpu(tclient->rcv_error_bytes.lo);
3610 ADD_64(estats->error_bytes_received_hi,
3611 estats->rx_stat_ifhcinbadoctets_hi,
3612 estats->error_bytes_received_lo,
3613 estats->rx_stat_ifhcinbadoctets_lo);
3615 ADD_64(fstats->total_bytes_received_hi,
3616 estats->error_bytes_received_hi,
3617 fstats->total_bytes_received_lo,
3618 estats->error_bytes_received_lo);
3620 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3621 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3622 total_multicast_packets_received);
3623 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3624 total_broadcast_packets_received);
3626 fstats->total_bytes_transmitted_hi =
3627 le32_to_cpu(xclient->total_sent_bytes.hi);
3628 fstats->total_bytes_transmitted_lo =
3629 le32_to_cpu(xclient->total_sent_bytes.lo);
3631 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3632 total_unicast_packets_transmitted);
3633 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3634 total_multicast_packets_transmitted);
3635 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3636 total_broadcast_packets_transmitted);
3638 memcpy(estats, &(fstats->total_bytes_received_hi),
3639 sizeof(struct host_func_stats) - 2*sizeof(u32));
3641 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3642 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3643 estats->brb_truncate_discard =
3644 le32_to_cpu(tport->brb_truncate_discard);
3645 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3647 old_tclient->rcv_unicast_bytes.hi =
3648 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3649 old_tclient->rcv_unicast_bytes.lo =
3650 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3651 old_tclient->rcv_broadcast_bytes.hi =
3652 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3653 old_tclient->rcv_broadcast_bytes.lo =
3654 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3655 old_tclient->rcv_multicast_bytes.hi =
3656 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3657 old_tclient->rcv_multicast_bytes.lo =
3658 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3659 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3661 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3662 old_tclient->packets_too_big_discard =
3663 le32_to_cpu(tclient->packets_too_big_discard);
3664 estats->no_buff_discard =
3665 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3666 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3668 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3669 old_xclient->unicast_bytes_sent.hi =
3670 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3671 old_xclient->unicast_bytes_sent.lo =
3672 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3673 old_xclient->multicast_bytes_sent.hi =
3674 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3675 old_xclient->multicast_bytes_sent.lo =
3676 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3677 old_xclient->broadcast_bytes_sent.hi =
3678 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3679 old_xclient->broadcast_bytes_sent.lo =
3680 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3682 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3687 static void bnx2x_net_stats_update(struct bnx2x *bp)
3689 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3690 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3691 struct net_device_stats *nstats = &bp->dev->stats;
3693 nstats->rx_packets =
3694 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3695 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3696 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3698 nstats->tx_packets =
3699 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3700 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3701 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3703 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3705 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3707 nstats->rx_dropped = old_tclient->checksum_discard +
3708 estats->mac_discard;
3709 nstats->tx_dropped = 0;
3712 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3714 nstats->collisions =
3715 estats->tx_stat_dot3statssinglecollisionframes_lo +
3716 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3717 estats->tx_stat_dot3statslatecollisions_lo +
3718 estats->tx_stat_dot3statsexcessivecollisions_lo;
3720 estats->jabber_packets_received =
3721 old_tclient->packets_too_big_discard +
3722 estats->rx_stat_dot3statsframestoolong_lo;
3724 nstats->rx_length_errors =
3725 estats->rx_stat_etherstatsundersizepkts_lo +
3726 estats->jabber_packets_received;
3727 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3728 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3729 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3730 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3731 nstats->rx_missed_errors = estats->xxoverflow_discard;
3733 nstats->rx_errors = nstats->rx_length_errors +
3734 nstats->rx_over_errors +
3735 nstats->rx_crc_errors +
3736 nstats->rx_frame_errors +
3737 nstats->rx_fifo_errors +
3738 nstats->rx_missed_errors;
3740 nstats->tx_aborted_errors =
3741 estats->tx_stat_dot3statslatecollisions_lo +
3742 estats->tx_stat_dot3statsexcessivecollisions_lo;
3743 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3744 nstats->tx_fifo_errors = 0;
3745 nstats->tx_heartbeat_errors = 0;
3746 nstats->tx_window_errors = 0;
3748 nstats->tx_errors = nstats->tx_aborted_errors +
3749 nstats->tx_carrier_errors;
3752 static void bnx2x_stats_update(struct bnx2x *bp)
3754 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3757 if (*stats_comp != DMAE_COMP_VAL)
3761 update = (bnx2x_hw_stats_update(bp) == 0);
3763 update |= (bnx2x_storm_stats_update(bp) == 0);
3766 bnx2x_net_stats_update(bp);
3769 if (bp->stats_pending) {
3770 bp->stats_pending++;
3771 if (bp->stats_pending == 3) {
3772 BNX2X_ERR("stats not updated for 3 times\n");
3779 if (bp->msglevel & NETIF_MSG_TIMER) {
3780 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3781 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3782 struct net_device_stats *nstats = &bp->dev->stats;
3785 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3786 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3788 bnx2x_tx_avail(bp->fp),
3789 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3790 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3792 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3793 bp->fp->rx_comp_cons),
3794 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3795 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3796 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3797 estats->driver_xoff, estats->brb_drop_lo);
3798 printk(KERN_DEBUG "tstats: checksum_discard %u "
3799 "packets_too_big_discard %u no_buff_discard %u "
3800 "mac_discard %u mac_filter_discard %u "
3801 "xxovrflow_discard %u brb_truncate_discard %u "
3802 "ttl0_discard %u\n",
3803 old_tclient->checksum_discard,
3804 old_tclient->packets_too_big_discard,
3805 old_tclient->no_buff_discard, estats->mac_discard,
3806 estats->mac_filter_discard, estats->xxoverflow_discard,
3807 estats->brb_truncate_discard,
3808 old_tclient->ttl0_discard);
3810 for_each_queue(bp, i) {
3811 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3812 bnx2x_fp(bp, i, tx_pkt),
3813 bnx2x_fp(bp, i, rx_pkt),
3814 bnx2x_fp(bp, i, rx_calls));
3818 bnx2x_hw_stats_post(bp);
3819 bnx2x_storm_stats_post(bp);
3822 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3824 struct dmae_command *dmae;
3826 int loader_idx = PMF_DMAE_C(bp);
3827 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3829 bp->executer_idx = 0;
3831 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3833 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3835 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3837 DMAE_CMD_ENDIANITY_DW_SWAP |
3839 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3840 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3842 if (bp->port.port_stx) {
3844 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3846 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3848 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3849 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3850 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3851 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3852 dmae->dst_addr_hi = 0;
3853 dmae->len = sizeof(struct host_port_stats) >> 2;
3855 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3856 dmae->comp_addr_hi = 0;
3859 dmae->comp_addr_lo =
3860 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3861 dmae->comp_addr_hi =
3862 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3863 dmae->comp_val = DMAE_COMP_VAL;
3871 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3872 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3873 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3874 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3875 dmae->dst_addr_lo = bp->func_stx >> 2;
3876 dmae->dst_addr_hi = 0;
3877 dmae->len = sizeof(struct host_func_stats) >> 2;
3878 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3879 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3880 dmae->comp_val = DMAE_COMP_VAL;
3886 static void bnx2x_stats_stop(struct bnx2x *bp)
3890 bnx2x_stats_comp(bp);
3893 update = (bnx2x_hw_stats_update(bp) == 0);
3895 update |= (bnx2x_storm_stats_update(bp) == 0);
3898 bnx2x_net_stats_update(bp);
3901 bnx2x_port_stats_stop(bp);
3903 bnx2x_hw_stats_post(bp);
3904 bnx2x_stats_comp(bp);
3908 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3912 static const struct {
3913 void (*action)(struct bnx2x *bp);
3914 enum bnx2x_stats_state next_state;
3915 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3918 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3919 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3920 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3921 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3924 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3925 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3926 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3927 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3931 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3933 enum bnx2x_stats_state state = bp->stats_state;
3935 bnx2x_stats_stm[state][event].action(bp);
3936 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3938 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3939 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3940 state, event, bp->stats_state);
3943 static void bnx2x_timer(unsigned long data)
3945 struct bnx2x *bp = (struct bnx2x *) data;
3947 if (!netif_running(bp->dev))
3950 if (atomic_read(&bp->intr_sem) != 0)
3954 struct bnx2x_fastpath *fp = &bp->fp[0];
3957 bnx2x_tx_int(fp, 1000);
3958 rc = bnx2x_rx_int(fp, 1000);
3961 if (!BP_NOMCP(bp)) {
3962 int func = BP_FUNC(bp);
3966 ++bp->fw_drv_pulse_wr_seq;
3967 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3968 /* TBD - add SYSTEM_TIME */
3969 drv_pulse = bp->fw_drv_pulse_wr_seq;
3970 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3972 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3973 MCP_PULSE_SEQ_MASK);
3974 /* The delta between driver pulse and mcp response
3975 * should be 1 (before mcp response) or 0 (after mcp response)
3977 if ((drv_pulse != mcp_pulse) &&
3978 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3979 /* someone lost a heartbeat... */
3980 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3981 drv_pulse, mcp_pulse);
3985 if ((bp->state == BNX2X_STATE_OPEN) ||
3986 (bp->state == BNX2X_STATE_DISABLED))
3987 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3990 mod_timer(&bp->timer, jiffies + bp->current_interval);
3993 /* end of Statistics */
3998 * nic init service functions
4001 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4003 int port = BP_PORT(bp);
4005 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4006 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4007 sizeof(struct ustorm_status_block)/4);
4008 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4009 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4010 sizeof(struct cstorm_status_block)/4);
4013 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4014 dma_addr_t mapping, int sb_id)
4016 int port = BP_PORT(bp);
4017 int func = BP_FUNC(bp);
4022 section = ((u64)mapping) + offsetof(struct host_status_block,
4024 sb->u_status_block.status_block_id = sb_id;
4026 REG_WR(bp, BAR_USTRORM_INTMEM +
4027 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4028 REG_WR(bp, BAR_USTRORM_INTMEM +
4029 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4031 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4032 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4034 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4035 REG_WR16(bp, BAR_USTRORM_INTMEM +
4036 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4039 section = ((u64)mapping) + offsetof(struct host_status_block,
4041 sb->c_status_block.status_block_id = sb_id;
4043 REG_WR(bp, BAR_CSTRORM_INTMEM +
4044 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4045 REG_WR(bp, BAR_CSTRORM_INTMEM +
4046 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4048 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4049 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4051 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4052 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4053 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4055 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4058 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4060 int func = BP_FUNC(bp);
4062 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4063 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4064 sizeof(struct ustorm_def_status_block)/4);
4065 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4066 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4067 sizeof(struct cstorm_def_status_block)/4);
4068 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4069 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4070 sizeof(struct xstorm_def_status_block)/4);
4071 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4072 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4073 sizeof(struct tstorm_def_status_block)/4);
4076 static void bnx2x_init_def_sb(struct bnx2x *bp,
4077 struct host_def_status_block *def_sb,
4078 dma_addr_t mapping, int sb_id)
4080 int port = BP_PORT(bp);
4081 int func = BP_FUNC(bp);
4082 int index, val, reg_offset;
4086 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4087 atten_status_block);
4088 def_sb->atten_status_block.status_block_id = sb_id;
4092 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4093 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4095 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4096 bp->attn_group[index].sig[0] = REG_RD(bp,
4097 reg_offset + 0x10*index);
4098 bp->attn_group[index].sig[1] = REG_RD(bp,
4099 reg_offset + 0x4 + 0x10*index);
4100 bp->attn_group[index].sig[2] = REG_RD(bp,
4101 reg_offset + 0x8 + 0x10*index);
4102 bp->attn_group[index].sig[3] = REG_RD(bp,
4103 reg_offset + 0xc + 0x10*index);
4106 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4107 HC_REG_ATTN_MSG0_ADDR_L);
4109 REG_WR(bp, reg_offset, U64_LO(section));
4110 REG_WR(bp, reg_offset + 4, U64_HI(section));
4112 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4114 val = REG_RD(bp, reg_offset);
4116 REG_WR(bp, reg_offset, val);
4119 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4120 u_def_status_block);
4121 def_sb->u_def_status_block.status_block_id = sb_id;
4123 REG_WR(bp, BAR_USTRORM_INTMEM +
4124 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4125 REG_WR(bp, BAR_USTRORM_INTMEM +
4126 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4128 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4129 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4131 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4132 REG_WR16(bp, BAR_USTRORM_INTMEM +
4133 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4136 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4137 c_def_status_block);
4138 def_sb->c_def_status_block.status_block_id = sb_id;
4140 REG_WR(bp, BAR_CSTRORM_INTMEM +
4141 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4142 REG_WR(bp, BAR_CSTRORM_INTMEM +
4143 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4145 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4146 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4148 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4149 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4150 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4153 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4154 t_def_status_block);
4155 def_sb->t_def_status_block.status_block_id = sb_id;
4157 REG_WR(bp, BAR_TSTRORM_INTMEM +
4158 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4159 REG_WR(bp, BAR_TSTRORM_INTMEM +
4160 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4162 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4163 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4165 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4166 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4167 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4170 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4171 x_def_status_block);
4172 def_sb->x_def_status_block.status_block_id = sb_id;
4174 REG_WR(bp, BAR_XSTRORM_INTMEM +
4175 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4176 REG_WR(bp, BAR_XSTRORM_INTMEM +
4177 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4179 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4180 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4182 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4183 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4184 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4186 bp->stats_pending = 0;
4187 bp->set_mac_pending = 0;
4189 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4192 static void bnx2x_update_coalesce(struct bnx2x *bp)
4194 int port = BP_PORT(bp);
4197 for_each_queue(bp, i) {
4198 int sb_id = bp->fp[i].sb_id;
4200 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4201 REG_WR8(bp, BAR_USTRORM_INTMEM +
4202 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4203 U_SB_ETH_RX_CQ_INDEX),
4205 REG_WR16(bp, BAR_USTRORM_INTMEM +
4206 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4207 U_SB_ETH_RX_CQ_INDEX),
4208 bp->rx_ticks ? 0 : 1);
4209 REG_WR16(bp, BAR_USTRORM_INTMEM +
4210 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4211 U_SB_ETH_RX_BD_INDEX),
4212 bp->rx_ticks ? 0 : 1);
4214 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4215 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4216 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4217 C_SB_ETH_TX_CQ_INDEX),
4219 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4220 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4221 C_SB_ETH_TX_CQ_INDEX),
4222 bp->tx_ticks ? 0 : 1);
4226 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4227 struct bnx2x_fastpath *fp, int last)
4231 for (i = 0; i < last; i++) {
4232 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4233 struct sk_buff *skb = rx_buf->skb;
4236 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4240 if (fp->tpa_state[i] == BNX2X_TPA_START)
4241 pci_unmap_single(bp->pdev,
4242 pci_unmap_addr(rx_buf, mapping),
4244 PCI_DMA_FROMDEVICE);
4251 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4253 int func = BP_FUNC(bp);
4254 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4255 ETH_MAX_AGGREGATION_QUEUES_E1H;
4256 u16 ring_prod, cqe_ring_prod;
4259 bp->rx_buf_size = bp->dev->mtu;
4260 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4261 BCM_RX_ETH_PAYLOAD_ALIGN;
4263 if (bp->flags & TPA_ENABLE_FLAG) {
4265 "rx_buf_size %d effective_mtu %d\n",
4266 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4268 for_each_queue(bp, j) {
4269 struct bnx2x_fastpath *fp = &bp->fp[j];
4271 for (i = 0; i < max_agg_queues; i++) {
4272 fp->tpa_pool[i].skb =
4273 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4274 if (!fp->tpa_pool[i].skb) {
4275 BNX2X_ERR("Failed to allocate TPA "
4276 "skb pool for queue[%d] - "
4277 "disabling TPA on this "
4279 bnx2x_free_tpa_pool(bp, fp, i);
4280 fp->disable_tpa = 1;
4283 pci_unmap_addr_set((struct sw_rx_bd *)
4284 &bp->fp->tpa_pool[i],
4286 fp->tpa_state[i] = BNX2X_TPA_STOP;
4291 for_each_queue(bp, j) {
4292 struct bnx2x_fastpath *fp = &bp->fp[j];
4295 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4296 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4298 /* "next page" elements initialization */
4300 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4301 struct eth_rx_sge *sge;
4303 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4305 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4306 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4308 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4309 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4312 bnx2x_init_sge_ring_bit_mask(fp);
4315 for (i = 1; i <= NUM_RX_RINGS; i++) {
4316 struct eth_rx_bd *rx_bd;
4318 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4320 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4321 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4323 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4324 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4328 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4329 struct eth_rx_cqe_next_page *nextpg;
4331 nextpg = (struct eth_rx_cqe_next_page *)
4332 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4334 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4335 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4337 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4338 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4341 /* Allocate SGEs and initialize the ring elements */
4342 for (i = 0, ring_prod = 0;
4343 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4345 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4346 BNX2X_ERR("was only able to allocate "
4348 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4349 /* Cleanup already allocated elements */
4350 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4351 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4352 fp->disable_tpa = 1;
4356 ring_prod = NEXT_SGE_IDX(ring_prod);
4358 fp->rx_sge_prod = ring_prod;
4360 /* Allocate BDs and initialize BD ring */
4361 fp->rx_comp_cons = 0;
4362 cqe_ring_prod = ring_prod = 0;
4363 for (i = 0; i < bp->rx_ring_size; i++) {
4364 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4365 BNX2X_ERR("was only able to allocate "
4367 bp->eth_stats.rx_skb_alloc_failed++;
4370 ring_prod = NEXT_RX_IDX(ring_prod);
4371 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4372 WARN_ON(ring_prod <= i);
4375 fp->rx_bd_prod = ring_prod;
4376 /* must not have more available CQEs than BDs */
4377 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4379 fp->rx_pkt = fp->rx_calls = 0;
4382 * this will generate an interrupt (to the TSTORM)
4383 * must only be done after chip is initialized
4385 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4390 REG_WR(bp, BAR_USTRORM_INTMEM +
4391 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4392 U64_LO(fp->rx_comp_mapping));
4393 REG_WR(bp, BAR_USTRORM_INTMEM +
4394 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4395 U64_HI(fp->rx_comp_mapping));
4399 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4403 for_each_queue(bp, j) {
4404 struct bnx2x_fastpath *fp = &bp->fp[j];
4406 for (i = 1; i <= NUM_TX_RINGS; i++) {
4407 struct eth_tx_bd *tx_bd =
4408 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4411 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4412 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4414 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4415 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4418 fp->tx_pkt_prod = 0;
4419 fp->tx_pkt_cons = 0;
4422 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4427 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4429 int func = BP_FUNC(bp);
4431 spin_lock_init(&bp->spq_lock);
4433 bp->spq_left = MAX_SPQ_PENDING;
4434 bp->spq_prod_idx = 0;
4435 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4436 bp->spq_prod_bd = bp->spq;
4437 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4439 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4440 U64_LO(bp->spq_mapping));
4442 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4443 U64_HI(bp->spq_mapping));
4445 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4449 static void bnx2x_init_context(struct bnx2x *bp)
4453 for_each_queue(bp, i) {
4454 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4455 struct bnx2x_fastpath *fp = &bp->fp[i];
4456 u8 sb_id = FP_SB_ID(fp);
4458 context->xstorm_st_context.tx_bd_page_base_hi =
4459 U64_HI(fp->tx_desc_mapping);
4460 context->xstorm_st_context.tx_bd_page_base_lo =
4461 U64_LO(fp->tx_desc_mapping);
4462 context->xstorm_st_context.db_data_addr_hi =
4463 U64_HI(fp->tx_prods_mapping);
4464 context->xstorm_st_context.db_data_addr_lo =
4465 U64_LO(fp->tx_prods_mapping);
4466 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4467 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4469 context->ustorm_st_context.common.sb_index_numbers =
4470 BNX2X_RX_SB_INDEX_NUM;
4471 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4472 context->ustorm_st_context.common.status_block_id = sb_id;
4473 context->ustorm_st_context.common.flags =
4474 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4475 context->ustorm_st_context.common.mc_alignment_size =
4476 BCM_RX_ETH_PAYLOAD_ALIGN;
4477 context->ustorm_st_context.common.bd_buff_size =
4479 context->ustorm_st_context.common.bd_page_base_hi =
4480 U64_HI(fp->rx_desc_mapping);
4481 context->ustorm_st_context.common.bd_page_base_lo =
4482 U64_LO(fp->rx_desc_mapping);
4483 if (!fp->disable_tpa) {
4484 context->ustorm_st_context.common.flags |=
4485 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4486 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4487 context->ustorm_st_context.common.sge_buff_size =
4488 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4489 context->ustorm_st_context.common.sge_page_base_hi =
4490 U64_HI(fp->rx_sge_mapping);
4491 context->ustorm_st_context.common.sge_page_base_lo =
4492 U64_LO(fp->rx_sge_mapping);
4495 context->cstorm_st_context.sb_index_number =
4496 C_SB_ETH_TX_CQ_INDEX;
4497 context->cstorm_st_context.status_block_id = sb_id;
4499 context->xstorm_ag_context.cdu_reserved =
4500 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4501 CDU_REGION_NUMBER_XCM_AG,
4502 ETH_CONNECTION_TYPE);
4503 context->ustorm_ag_context.cdu_usage =
4504 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4505 CDU_REGION_NUMBER_UCM_AG,
4506 ETH_CONNECTION_TYPE);
4510 static void bnx2x_init_ind_table(struct bnx2x *bp)
4512 int port = BP_PORT(bp);
4518 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4519 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4520 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4521 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4522 i % bp->num_queues);
4524 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4527 static void bnx2x_set_client_config(struct bnx2x *bp)
4529 struct tstorm_eth_client_config tstorm_client = {0};
4530 int port = BP_PORT(bp);
4533 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4534 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4535 tstorm_client.config_flags =
4536 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4538 if (bp->rx_mode && bp->vlgrp) {
4539 tstorm_client.config_flags |=
4540 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4541 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4545 if (bp->flags & TPA_ENABLE_FLAG) {
4546 tstorm_client.max_sges_for_packet =
4547 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4548 tstorm_client.max_sges_for_packet =
4549 ((tstorm_client.max_sges_for_packet +
4550 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4551 PAGES_PER_SGE_SHIFT;
4553 tstorm_client.config_flags |=
4554 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4557 for_each_queue(bp, i) {
4558 REG_WR(bp, BAR_TSTRORM_INTMEM +
4559 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4560 ((u32 *)&tstorm_client)[0]);
4561 REG_WR(bp, BAR_TSTRORM_INTMEM +
4562 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4563 ((u32 *)&tstorm_client)[1]);
4566 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4567 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4570 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4572 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4573 int mode = bp->rx_mode;
4574 int mask = (1 << BP_L_ID(bp));
4575 int func = BP_FUNC(bp);
4578 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4581 case BNX2X_RX_MODE_NONE: /* no Rx */
4582 tstorm_mac_filter.ucast_drop_all = mask;
4583 tstorm_mac_filter.mcast_drop_all = mask;
4584 tstorm_mac_filter.bcast_drop_all = mask;
4586 case BNX2X_RX_MODE_NORMAL:
4587 tstorm_mac_filter.bcast_accept_all = mask;
4589 case BNX2X_RX_MODE_ALLMULTI:
4590 tstorm_mac_filter.mcast_accept_all = mask;
4591 tstorm_mac_filter.bcast_accept_all = mask;
4593 case BNX2X_RX_MODE_PROMISC:
4594 tstorm_mac_filter.ucast_accept_all = mask;
4595 tstorm_mac_filter.mcast_accept_all = mask;
4596 tstorm_mac_filter.bcast_accept_all = mask;
4599 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4603 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4604 REG_WR(bp, BAR_TSTRORM_INTMEM +
4605 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4606 ((u32 *)&tstorm_mac_filter)[i]);
4608 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4609 ((u32 *)&tstorm_mac_filter)[i]); */
4612 if (mode != BNX2X_RX_MODE_NONE)
4613 bnx2x_set_client_config(bp);
4616 static void bnx2x_init_internal_common(struct bnx2x *bp)
4620 if (bp->flags & TPA_ENABLE_FLAG) {
4621 struct tstorm_eth_tpa_exist tpa = {0};
4625 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4627 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4631 /* Zero this manually as its initialization is
4632 currently missing in the initTool */
4633 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4634 REG_WR(bp, BAR_USTRORM_INTMEM +
4635 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4638 static void bnx2x_init_internal_port(struct bnx2x *bp)
4640 int port = BP_PORT(bp);
4642 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4643 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4644 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4645 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4648 static void bnx2x_init_internal_func(struct bnx2x *bp)
4650 struct tstorm_eth_function_common_config tstorm_config = {0};
4651 struct stats_indication_flags stats_flags = {0};
4652 int port = BP_PORT(bp);
4653 int func = BP_FUNC(bp);
4658 tstorm_config.config_flags = MULTI_FLAGS;
4659 tstorm_config.rss_result_mask = MULTI_MASK;
4662 tstorm_config.leading_client_id = BP_L_ID(bp);
4664 REG_WR(bp, BAR_TSTRORM_INTMEM +
4665 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4666 (*(u32 *)&tstorm_config));
4668 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4669 bnx2x_set_storm_rx_mode(bp);
4671 /* reset xstorm per client statistics */
4672 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4673 REG_WR(bp, BAR_XSTRORM_INTMEM +
4674 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4677 /* reset tstorm per client statistics */
4678 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4679 REG_WR(bp, BAR_TSTRORM_INTMEM +
4680 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4684 /* Init statistics related context */
4685 stats_flags.collect_eth = 1;
4687 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4688 ((u32 *)&stats_flags)[0]);
4689 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4690 ((u32 *)&stats_flags)[1]);
4692 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4693 ((u32 *)&stats_flags)[0]);
4694 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4695 ((u32 *)&stats_flags)[1]);
4697 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4698 ((u32 *)&stats_flags)[0]);
4699 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4700 ((u32 *)&stats_flags)[1]);
4702 REG_WR(bp, BAR_XSTRORM_INTMEM +
4703 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4704 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4705 REG_WR(bp, BAR_XSTRORM_INTMEM +
4706 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4707 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4709 REG_WR(bp, BAR_TSTRORM_INTMEM +
4710 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4711 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4712 REG_WR(bp, BAR_TSTRORM_INTMEM +
4713 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4714 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4716 if (CHIP_IS_E1H(bp)) {
4717 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4719 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4721 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4723 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4726 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4730 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4732 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4733 SGE_PAGE_SIZE * PAGES_PER_SGE),
4735 for_each_queue(bp, i) {
4736 struct bnx2x_fastpath *fp = &bp->fp[i];
4738 REG_WR(bp, BAR_USTRORM_INTMEM +
4739 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4740 U64_LO(fp->rx_comp_mapping));
4741 REG_WR(bp, BAR_USTRORM_INTMEM +
4742 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4743 U64_HI(fp->rx_comp_mapping));
4745 REG_WR16(bp, BAR_USTRORM_INTMEM +
4746 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4751 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4753 switch (load_code) {
4754 case FW_MSG_CODE_DRV_LOAD_COMMON:
4755 bnx2x_init_internal_common(bp);
4758 case FW_MSG_CODE_DRV_LOAD_PORT:
4759 bnx2x_init_internal_port(bp);
4762 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4763 bnx2x_init_internal_func(bp);
4767 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4772 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4776 for_each_queue(bp, i) {
4777 struct bnx2x_fastpath *fp = &bp->fp[i];
4780 fp->state = BNX2X_FP_STATE_CLOSED;
4782 fp->cl_id = BP_L_ID(bp) + i;
4783 fp->sb_id = fp->cl_id;
4785 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4786 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4787 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4789 bnx2x_update_fpsb_idx(fp);
4792 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4794 bnx2x_update_dsb_idx(bp);
4795 bnx2x_update_coalesce(bp);
4796 bnx2x_init_rx_rings(bp);
4797 bnx2x_init_tx_ring(bp);
4798 bnx2x_init_sp_ring(bp);
4799 bnx2x_init_context(bp);
4800 bnx2x_init_internal(bp, load_code);
4801 bnx2x_init_ind_table(bp);
4802 bnx2x_int_enable(bp);
4805 /* end of nic init */
4808 * gzip service functions
4811 static int bnx2x_gunzip_init(struct bnx2x *bp)
4813 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4814 &bp->gunzip_mapping);
4815 if (bp->gunzip_buf == NULL)
4818 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4819 if (bp->strm == NULL)
4822 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4824 if (bp->strm->workspace == NULL)
4834 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4835 bp->gunzip_mapping);
4836 bp->gunzip_buf = NULL;
4839 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4840 " un-compression\n", bp->dev->name);
4844 static void bnx2x_gunzip_end(struct bnx2x *bp)
4846 kfree(bp->strm->workspace);
4851 if (bp->gunzip_buf) {
4852 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4853 bp->gunzip_mapping);
4854 bp->gunzip_buf = NULL;
4858 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4862 /* check gzip header */
4863 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4870 if (zbuf[3] & FNAME)
4871 while ((zbuf[n++] != 0) && (n < len));
4873 bp->strm->next_in = zbuf + n;
4874 bp->strm->avail_in = len - n;
4875 bp->strm->next_out = bp->gunzip_buf;
4876 bp->strm->avail_out = FW_BUF_SIZE;
4878 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4882 rc = zlib_inflate(bp->strm, Z_FINISH);
4883 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4884 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4885 bp->dev->name, bp->strm->msg);
4887 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4888 if (bp->gunzip_outlen & 0x3)
4889 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4890 " gunzip_outlen (%d) not aligned\n",
4891 bp->dev->name, bp->gunzip_outlen);
4892 bp->gunzip_outlen >>= 2;
4894 zlib_inflateEnd(bp->strm);
4896 if (rc == Z_STREAM_END)
4902 /* nic load/unload */
4905 * General service functions
4908 /* send a NIG loopback debug packet */
4909 static void bnx2x_lb_pckt(struct bnx2x *bp)
4913 /* Ethernet source and destination addresses */
4914 wb_write[0] = 0x55555555;
4915 wb_write[1] = 0x55555555;
4916 wb_write[2] = 0x20; /* SOP */
4917 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4919 /* NON-IP protocol */
4920 wb_write[0] = 0x09000000;
4921 wb_write[1] = 0x55555555;
4922 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4923 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4926 /* some of the internal memories
4927 * are not directly readable from the driver
4928 * to test them we send debug packets
4930 static int bnx2x_int_mem_test(struct bnx2x *bp)
4936 if (CHIP_REV_IS_FPGA(bp))
4938 else if (CHIP_REV_IS_EMUL(bp))
4943 DP(NETIF_MSG_HW, "start part1\n");
4945 /* Disable inputs of parser neighbor blocks */
4946 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4947 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4948 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4949 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4951 /* Write 0 to parser credits for CFC search request */
4952 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4954 /* send Ethernet packet */
4957 /* TODO do i reset NIG statistic? */
4958 /* Wait until NIG register shows 1 packet of size 0x10 */
4959 count = 1000 * factor;
4962 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4963 val = *bnx2x_sp(bp, wb_data[0]);
4971 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4975 /* Wait until PRS register shows 1 packet */
4976 count = 1000 * factor;
4978 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4986 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4990 /* Reset and init BRB, PRS */
4991 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4993 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4995 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4996 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4998 DP(NETIF_MSG_HW, "part2\n");
5000 /* Disable inputs of parser neighbor blocks */
5001 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5002 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5003 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5004 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5006 /* Write 0 to parser credits for CFC search request */
5007 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5009 /* send 10 Ethernet packets */
5010 for (i = 0; i < 10; i++)
5013 /* Wait until NIG register shows 10 + 1
5014 packets of size 11*0x10 = 0xb0 */
5015 count = 1000 * factor;
5018 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5019 val = *bnx2x_sp(bp, wb_data[0]);
5027 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5031 /* Wait until PRS register shows 2 packets */
5032 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5034 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5036 /* Write 1 to parser credits for CFC search request */
5037 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5039 /* Wait until PRS register shows 3 packets */
5040 msleep(10 * factor);
5041 /* Wait until NIG register shows 1 packet of size 0x10 */
5042 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5044 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5046 /* clear NIG EOP FIFO */
5047 for (i = 0; i < 11; i++)
5048 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5049 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5051 BNX2X_ERR("clear of NIG failed\n");
5055 /* Reset and init BRB, PRS, NIG */
5056 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5058 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5060 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5061 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5064 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5067 /* Enable inputs of parser neighbor blocks */
5068 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5069 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5070 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5071 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5073 DP(NETIF_MSG_HW, "done\n");
5078 static void enable_blocks_attention(struct bnx2x *bp)
5080 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5081 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5082 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5083 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5084 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5085 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5086 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5087 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5088 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5089 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5090 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5091 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5092 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5093 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5094 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5095 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5096 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5097 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5098 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5099 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5100 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5101 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5102 if (CHIP_REV_IS_FPGA(bp))
5103 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5105 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5106 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5107 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5108 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5109 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5110 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5111 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5112 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5113 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5114 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5118 static int bnx2x_init_common(struct bnx2x *bp)
5122 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5124 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5125 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5127 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5128 if (CHIP_IS_E1H(bp))
5129 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5131 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5133 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5135 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5136 if (CHIP_IS_E1(bp)) {
5137 /* enable HW interrupt from PXP on USDM overflow
5138 bit 16 on INT_MASK_0 */
5139 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5142 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5146 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5147 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5148 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5149 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5150 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5151 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5153 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5154 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5155 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5156 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5157 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5160 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5162 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5163 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5164 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5167 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5168 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5170 /* let the HW do it's magic ... */
5172 /* finish PXP init */
5173 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5175 BNX2X_ERR("PXP2 CFG failed\n");
5178 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5180 BNX2X_ERR("PXP2 RD_INIT failed\n");
5184 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5185 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5187 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5189 /* clean the DMAE memory */
5191 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5193 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5194 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5195 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5196 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5198 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5199 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5200 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5201 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5203 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5204 /* soft reset pulse */
5205 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5206 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5209 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5212 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5213 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5214 if (!CHIP_REV_IS_SLOW(bp)) {
5215 /* enable hw interrupt from doorbell Q */
5216 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5219 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5220 if (CHIP_REV_IS_SLOW(bp)) {
5221 /* fix for emulation and FPGA for no pause */
5222 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5223 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5224 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5225 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5228 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5230 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5231 if (CHIP_IS_E1H(bp))
5232 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5234 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5235 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5236 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5237 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5239 if (CHIP_IS_E1H(bp)) {
5240 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5241 STORM_INTMEM_SIZE_E1H/2);
5243 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5244 0, STORM_INTMEM_SIZE_E1H/2);
5245 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5246 STORM_INTMEM_SIZE_E1H/2);
5248 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5249 0, STORM_INTMEM_SIZE_E1H/2);
5250 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5251 STORM_INTMEM_SIZE_E1H/2);
5253 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5254 0, STORM_INTMEM_SIZE_E1H/2);
5255 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5256 STORM_INTMEM_SIZE_E1H/2);
5258 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5259 0, STORM_INTMEM_SIZE_E1H/2);
5261 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5262 STORM_INTMEM_SIZE_E1);
5263 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5264 STORM_INTMEM_SIZE_E1);
5265 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5266 STORM_INTMEM_SIZE_E1);
5267 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5268 STORM_INTMEM_SIZE_E1);
5271 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5272 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5273 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5274 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5277 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5279 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5282 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5283 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5284 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5286 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5287 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5288 REG_WR(bp, i, 0xc0cac01a);
5289 /* TODO: replace with something meaningful */
5291 if (CHIP_IS_E1H(bp))
5292 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5293 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5295 if (sizeof(union cdu_context) != 1024)
5296 /* we currently assume that a context is 1024 bytes */
5297 printk(KERN_ALERT PFX "please adjust the size of"
5298 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5300 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5301 val = (4 << 24) + (0 << 12) + 1024;
5302 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5303 if (CHIP_IS_E1(bp)) {
5304 /* !!! fix pxp client crdit until excel update */
5305 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5306 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5309 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5310 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5312 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5313 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5315 /* PXPCS COMMON comes here */
5316 /* Reset PCIE errors for debug */
5317 REG_WR(bp, 0x2814, 0xffffffff);
5318 REG_WR(bp, 0x3820, 0xffffffff);
5320 /* EMAC0 COMMON comes here */
5321 /* EMAC1 COMMON comes here */
5322 /* DBU COMMON comes here */
5323 /* DBG COMMON comes here */
5325 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5326 if (CHIP_IS_E1H(bp)) {
5327 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5328 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5331 if (CHIP_REV_IS_SLOW(bp))
5334 /* finish CFC init */
5335 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5337 BNX2X_ERR("CFC LL_INIT failed\n");
5340 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5342 BNX2X_ERR("CFC AC_INIT failed\n");
5345 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5347 BNX2X_ERR("CFC CAM_INIT failed\n");
5350 REG_WR(bp, CFC_REG_DEBUG0, 0);
5352 /* read NIG statistic
5353 to see if this is our first up since powerup */
5354 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5355 val = *bnx2x_sp(bp, wb_data[0]);
5357 /* do internal memory self test */
5358 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5359 BNX2X_ERR("internal mem self test failed\n");
5363 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5364 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5365 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5366 /* Fan failure is indicated by SPIO 5 */
5367 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5368 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5370 /* set to active low mode */
5371 val = REG_RD(bp, MISC_REG_SPIO_INT);
5372 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5373 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5374 REG_WR(bp, MISC_REG_SPIO_INT, val);
5376 /* enable interrupt to signal the IGU */
5377 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5378 val |= (1 << MISC_REGISTERS_SPIO_5);
5379 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5386 /* clear PXP2 attentions */
5387 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5389 enable_blocks_attention(bp);
5391 if (!BP_NOMCP(bp)) {
5392 bnx2x_acquire_phy_lock(bp);
5393 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5394 bnx2x_release_phy_lock(bp);
5396 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5401 static int bnx2x_init_port(struct bnx2x *bp)
5403 int port = BP_PORT(bp);
5406 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5408 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5410 /* Port PXP comes here */
5411 /* Port PXP2 comes here */
5416 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5417 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5418 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5419 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5424 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5425 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5426 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5427 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5432 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5433 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5434 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5435 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5437 /* Port CMs come here */
5439 /* Port QM comes here */
5441 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5442 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5444 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5445 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5447 /* Port DQ comes here */
5448 /* Port BRB1 comes here */
5449 /* Port PRS comes here */
5450 /* Port TSDM comes here */
5451 /* Port CSDM comes here */
5452 /* Port USDM comes here */
5453 /* Port XSDM comes here */
5454 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5455 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5456 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5457 port ? USEM_PORT1_END : USEM_PORT0_END);
5458 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5459 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5460 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5461 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5462 /* Port UPB comes here */
5463 /* Port XPB comes here */
5465 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5466 port ? PBF_PORT1_END : PBF_PORT0_END);
5468 /* configure PBF to work without PAUSE mtu 9000 */
5469 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5471 /* update threshold */
5472 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5473 /* update init credit */
5474 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5477 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5479 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5482 /* tell the searcher where the T2 table is */
5483 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5485 wb_write[0] = U64_LO(bp->t2_mapping);
5486 wb_write[1] = U64_HI(bp->t2_mapping);
5487 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5488 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5489 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5490 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5492 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5493 /* Port SRCH comes here */
5495 /* Port CDU comes here */
5496 /* Port CFC comes here */
5498 if (CHIP_IS_E1(bp)) {
5499 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5500 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5502 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5503 port ? HC_PORT1_END : HC_PORT0_END);
5505 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5506 MISC_AEU_PORT0_START,
5507 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5508 /* init aeu_mask_attn_func_0/1:
5509 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5510 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5511 * bits 4-7 are used for "per vn group attention" */
5512 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5513 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5515 /* Port PXPCS comes here */
5516 /* Port EMAC0 comes here */
5517 /* Port EMAC1 comes here */
5518 /* Port DBU comes here */
5519 /* Port DBG comes here */
5520 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5521 port ? NIG_PORT1_END : NIG_PORT0_END);
5523 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5525 if (CHIP_IS_E1H(bp)) {
5527 struct cmng_struct_per_port m_cmng_port;
5530 /* 0x2 disable e1hov, 0x1 enable */
5531 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5532 (IS_E1HMF(bp) ? 0x1 : 0x2));
5534 /* Init RATE SHAPING and FAIRNESS contexts.
5535 Initialize as if there is 10G link. */
5536 wsum = bnx2x_calc_vn_wsum(bp);
5537 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5539 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5540 bnx2x_init_vn_minmax(bp, 2*vn + port,
5541 wsum, 10000, &m_cmng_port);
5544 /* Port MCP comes here */
5545 /* Port DMAE comes here */
5547 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5548 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5549 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5550 /* add SPIO 5 to group 0 */
5551 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5552 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5553 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5560 bnx2x__link_reset(bp);
5565 #define ILT_PER_FUNC (768/2)
5566 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5567 /* the phys address is shifted right 12 bits and has an added
5568 1=valid bit added to the 53rd bit
5569 then since this is a wide register(TM)
5570 we split it into two 32 bit writes
5572 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5573 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5574 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5575 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5577 #define CNIC_ILT_LINES 0
5579 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5583 if (CHIP_IS_E1H(bp))
5584 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5586 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5588 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5591 static int bnx2x_init_func(struct bnx2x *bp)
5593 int port = BP_PORT(bp);
5594 int func = BP_FUNC(bp);
5597 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5599 i = FUNC_ILT_BASE(func);
5601 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5602 if (CHIP_IS_E1H(bp)) {
5603 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5604 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5606 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5607 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5610 if (CHIP_IS_E1H(bp)) {
5611 for (i = 0; i < 9; i++)
5612 bnx2x_init_block(bp,
5613 cm_start[func][i], cm_end[func][i]);
5615 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5616 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5619 /* HC init per function */
5620 if (CHIP_IS_E1H(bp)) {
5621 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5623 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5624 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5626 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5628 if (CHIP_IS_E1H(bp))
5629 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5631 /* Reset PCIE errors for debug */
5632 REG_WR(bp, 0x2114, 0xffffffff);
5633 REG_WR(bp, 0x2120, 0xffffffff);
5638 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5642 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5643 BP_FUNC(bp), load_code);
5646 mutex_init(&bp->dmae_mutex);
5647 bnx2x_gunzip_init(bp);
5649 switch (load_code) {
5650 case FW_MSG_CODE_DRV_LOAD_COMMON:
5651 rc = bnx2x_init_common(bp);
5656 case FW_MSG_CODE_DRV_LOAD_PORT:
5658 rc = bnx2x_init_port(bp);
5663 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5665 rc = bnx2x_init_func(bp);
5671 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5675 if (!BP_NOMCP(bp)) {
5676 int func = BP_FUNC(bp);
5678 bp->fw_drv_pulse_wr_seq =
5679 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5680 DRV_PULSE_SEQ_MASK);
5681 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5682 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5683 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5687 /* this needs to be done before gunzip end */
5688 bnx2x_zero_def_sb(bp);
5689 for_each_queue(bp, i)
5690 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5693 bnx2x_gunzip_end(bp);
5698 /* send the MCP a request, block until there is a reply */
5699 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5701 int func = BP_FUNC(bp);
5702 u32 seq = ++bp->fw_seq;
5705 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5707 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5708 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5711 /* let the FW do it's magic ... */
5714 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5716 /* Give the FW up to 2 second (200*10ms) */
5717 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5719 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5720 cnt*delay, rc, seq);
5722 /* is this a reply to our command? */
5723 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5724 rc &= FW_MSG_CODE_MASK;
5728 BNX2X_ERR("FW failed to respond!\n");
5736 static void bnx2x_free_mem(struct bnx2x *bp)
5739 #define BNX2X_PCI_FREE(x, y, size) \
5742 pci_free_consistent(bp->pdev, size, x, y); \
5748 #define BNX2X_FREE(x) \
5759 for_each_queue(bp, i) {
5762 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5763 bnx2x_fp(bp, i, status_blk_mapping),
5764 sizeof(struct host_status_block) +
5765 sizeof(struct eth_tx_db_data));
5767 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5768 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5769 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5770 bnx2x_fp(bp, i, tx_desc_mapping),
5771 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5773 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5774 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5775 bnx2x_fp(bp, i, rx_desc_mapping),
5776 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5778 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5779 bnx2x_fp(bp, i, rx_comp_mapping),
5780 sizeof(struct eth_fast_path_rx_cqe) *
5784 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5785 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5786 bnx2x_fp(bp, i, rx_sge_mapping),
5787 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5789 /* end of fastpath */
5791 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5792 sizeof(struct host_def_status_block));
5794 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5795 sizeof(struct bnx2x_slowpath));
5798 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5799 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5800 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5801 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5803 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5805 #undef BNX2X_PCI_FREE
5809 static int bnx2x_alloc_mem(struct bnx2x *bp)
5812 #define BNX2X_PCI_ALLOC(x, y, size) \
5814 x = pci_alloc_consistent(bp->pdev, size, y); \
5816 goto alloc_mem_err; \
5817 memset(x, 0, size); \
5820 #define BNX2X_ALLOC(x, size) \
5822 x = vmalloc(size); \
5824 goto alloc_mem_err; \
5825 memset(x, 0, size); \
5831 for_each_queue(bp, i) {
5832 bnx2x_fp(bp, i, bp) = bp;
5835 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5836 &bnx2x_fp(bp, i, status_blk_mapping),
5837 sizeof(struct host_status_block) +
5838 sizeof(struct eth_tx_db_data));
5840 bnx2x_fp(bp, i, hw_tx_prods) =
5841 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5843 bnx2x_fp(bp, i, tx_prods_mapping) =
5844 bnx2x_fp(bp, i, status_blk_mapping) +
5845 sizeof(struct host_status_block);
5847 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5848 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5849 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5850 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5851 &bnx2x_fp(bp, i, tx_desc_mapping),
5852 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5854 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5855 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5856 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5857 &bnx2x_fp(bp, i, rx_desc_mapping),
5858 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5860 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5861 &bnx2x_fp(bp, i, rx_comp_mapping),
5862 sizeof(struct eth_fast_path_rx_cqe) *
5866 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5867 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5868 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5869 &bnx2x_fp(bp, i, rx_sge_mapping),
5870 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5872 /* end of fastpath */
5874 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5875 sizeof(struct host_def_status_block));
5877 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5878 sizeof(struct bnx2x_slowpath));
5881 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5884 for (i = 0; i < 64*1024; i += 64) {
5885 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5886 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5889 /* allocate searcher T2 table
5890 we allocate 1/4 of alloc num for T2
5891 (which is not entered into the ILT) */
5892 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5895 for (i = 0; i < 16*1024; i += 64)
5896 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5898 /* now fixup the last line in the block to point to the next block */
5899 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5901 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5902 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5904 /* QM queues (128*MAX_CONN) */
5905 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5908 /* Slow path ring */
5909 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5917 #undef BNX2X_PCI_ALLOC
5921 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5925 for_each_queue(bp, i) {
5926 struct bnx2x_fastpath *fp = &bp->fp[i];
5928 u16 bd_cons = fp->tx_bd_cons;
5929 u16 sw_prod = fp->tx_pkt_prod;
5930 u16 sw_cons = fp->tx_pkt_cons;
5932 while (sw_cons != sw_prod) {
5933 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5939 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5943 for_each_queue(bp, j) {
5944 struct bnx2x_fastpath *fp = &bp->fp[j];
5946 for (i = 0; i < NUM_RX_BD; i++) {
5947 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5948 struct sk_buff *skb = rx_buf->skb;
5953 pci_unmap_single(bp->pdev,
5954 pci_unmap_addr(rx_buf, mapping),
5956 PCI_DMA_FROMDEVICE);
5961 if (!fp->disable_tpa)
5962 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5963 ETH_MAX_AGGREGATION_QUEUES_E1 :
5964 ETH_MAX_AGGREGATION_QUEUES_E1H);
5968 static void bnx2x_free_skbs(struct bnx2x *bp)
5970 bnx2x_free_tx_skbs(bp);
5971 bnx2x_free_rx_skbs(bp);
5974 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5978 free_irq(bp->msix_table[0].vector, bp->dev);
5979 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5980 bp->msix_table[0].vector);
5982 for_each_queue(bp, i) {
5983 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
5984 "state %x\n", i, bp->msix_table[i + offset].vector,
5985 bnx2x_fp(bp, i, state));
5987 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5988 BNX2X_ERR("IRQ of fp #%d being freed while "
5989 "state != closed\n", i);
5991 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
5995 static void bnx2x_free_irq(struct bnx2x *bp)
5997 if (bp->flags & USING_MSIX_FLAG) {
5998 bnx2x_free_msix_irqs(bp);
5999 pci_disable_msix(bp->pdev);
6000 bp->flags &= ~USING_MSIX_FLAG;
6003 free_irq(bp->pdev->irq, bp->dev);
6006 static int bnx2x_enable_msix(struct bnx2x *bp)
6010 bp->msix_table[0].entry = 0;
6012 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6014 for_each_queue(bp, i) {
6015 int igu_vec = offset + i + BP_L_ID(bp);
6017 bp->msix_table[i + offset].entry = igu_vec;
6018 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6019 "(fastpath #%u)\n", i + offset, igu_vec, i);
6022 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6023 bp->num_queues + offset);
6025 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6028 bp->flags |= USING_MSIX_FLAG;
6033 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6035 int i, rc, offset = 1;
6037 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6038 bp->dev->name, bp->dev);
6040 BNX2X_ERR("request sp irq failed\n");
6044 for_each_queue(bp, i) {
6045 rc = request_irq(bp->msix_table[i + offset].vector,
6046 bnx2x_msix_fp_int, 0,
6047 bp->dev->name, &bp->fp[i]);
6049 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6051 bnx2x_free_msix_irqs(bp);
6055 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6061 static int bnx2x_req_irq(struct bnx2x *bp)
6065 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6066 bp->dev->name, bp->dev);
6068 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6073 static void bnx2x_napi_enable(struct bnx2x *bp)
6077 for_each_queue(bp, i)
6078 napi_enable(&bnx2x_fp(bp, i, napi));
6081 static void bnx2x_napi_disable(struct bnx2x *bp)
6085 for_each_queue(bp, i)
6086 napi_disable(&bnx2x_fp(bp, i, napi));
6089 static void bnx2x_netif_start(struct bnx2x *bp)
6091 if (atomic_dec_and_test(&bp->intr_sem)) {
6092 if (netif_running(bp->dev)) {
6093 if (bp->state == BNX2X_STATE_OPEN)
6094 netif_wake_queue(bp->dev);
6095 bnx2x_napi_enable(bp);
6096 bnx2x_int_enable(bp);
6101 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6103 bnx2x_int_disable_sync(bp, disable_hw);
6104 if (netif_running(bp->dev)) {
6105 bnx2x_napi_disable(bp);
6106 netif_tx_disable(bp->dev);
6107 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6112 * Init service functions
6115 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6117 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6118 int port = BP_PORT(bp);
6121 * unicasts 0-31:port0 32-63:port1
6122 * multicast 64-127:port0 128-191:port1
6124 config->hdr.length_6b = 2;
6125 config->hdr.offset = port ? 31 : 0;
6126 config->hdr.client_id = BP_CL_ID(bp);
6127 config->hdr.reserved1 = 0;
6130 config->config_table[0].cam_entry.msb_mac_addr =
6131 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6132 config->config_table[0].cam_entry.middle_mac_addr =
6133 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6134 config->config_table[0].cam_entry.lsb_mac_addr =
6135 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6136 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6138 config->config_table[0].target_table_entry.flags = 0;
6140 CAM_INVALIDATE(config->config_table[0]);
6141 config->config_table[0].target_table_entry.client_id = 0;
6142 config->config_table[0].target_table_entry.vlan_id = 0;
6144 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6145 (set ? "setting" : "clearing"),
6146 config->config_table[0].cam_entry.msb_mac_addr,
6147 config->config_table[0].cam_entry.middle_mac_addr,
6148 config->config_table[0].cam_entry.lsb_mac_addr);
6151 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6152 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6153 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6154 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6156 config->config_table[1].target_table_entry.flags =
6157 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6159 CAM_INVALIDATE(config->config_table[1]);
6160 config->config_table[1].target_table_entry.client_id = 0;
6161 config->config_table[1].target_table_entry.vlan_id = 0;
6163 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6164 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6165 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6168 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6170 struct mac_configuration_cmd_e1h *config =
6171 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6173 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6174 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6178 /* CAM allocation for E1H
6179 * unicasts: by func number
6180 * multicast: 20+FUNC*20, 20 each
6182 config->hdr.length_6b = 1;
6183 config->hdr.offset = BP_FUNC(bp);
6184 config->hdr.client_id = BP_CL_ID(bp);
6185 config->hdr.reserved1 = 0;
6188 config->config_table[0].msb_mac_addr =
6189 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6190 config->config_table[0].middle_mac_addr =
6191 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6192 config->config_table[0].lsb_mac_addr =
6193 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6194 config->config_table[0].client_id = BP_L_ID(bp);
6195 config->config_table[0].vlan_id = 0;
6196 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6198 config->config_table[0].flags = BP_PORT(bp);
6200 config->config_table[0].flags =
6201 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6203 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6204 (set ? "setting" : "clearing"),
6205 config->config_table[0].msb_mac_addr,
6206 config->config_table[0].middle_mac_addr,
6207 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6209 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6210 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6211 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6214 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6215 int *state_p, int poll)
6217 /* can take a while if any port is running */
6220 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6221 poll ? "polling" : "waiting", state, idx);
6226 bnx2x_rx_int(bp->fp, 10);
6227 /* if index is different from 0
6228 * the reply for some commands will
6229 * be on the non default queue
6232 bnx2x_rx_int(&bp->fp[idx], 10);
6235 mb(); /* state is changed by bnx2x_sp_event() */
6236 if (*state_p == state)
6243 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6244 poll ? "polling" : "waiting", state, idx);
6245 #ifdef BNX2X_STOP_ON_ERROR
6252 static int bnx2x_setup_leading(struct bnx2x *bp)
6256 /* reset IGU state */
6257 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6260 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6262 /* Wait for completion */
6263 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6268 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6270 /* reset IGU state */
6271 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6274 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6275 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6277 /* Wait for completion */
6278 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6279 &(bp->fp[index].state), 0);
6282 static int bnx2x_poll(struct napi_struct *napi, int budget);
6283 static void bnx2x_set_rx_mode(struct net_device *dev);
6285 /* must be called with rtnl_lock */
6286 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6290 #ifdef BNX2X_STOP_ON_ERROR
6291 if (unlikely(bp->panic))
6295 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6297 /* Send LOAD_REQUEST command to MCP
6298 Returns the type of LOAD command:
6299 if it is the first port to be initialized
6300 common blocks should be initialized, otherwise - not
6302 if (!BP_NOMCP(bp)) {
6303 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6305 BNX2X_ERR("MCP response failure, aborting\n");
6308 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6309 return -EBUSY; /* other port in diagnostic mode */
6312 int port = BP_PORT(bp);
6314 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6315 load_count[0], load_count[1], load_count[2]);
6317 load_count[1 + port]++;
6318 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6319 load_count[0], load_count[1], load_count[2]);
6320 if (load_count[0] == 1)
6321 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6322 else if (load_count[1 + port] == 1)
6323 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6325 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6328 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6329 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6333 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6335 /* if we can't use MSI-X we only need one fp,
6336 * so try to enable MSI-X with the requested number of fp's
6337 * and fallback to inta with one fp
6343 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6344 /* user requested number */
6345 bp->num_queues = use_multi;
6348 bp->num_queues = min_t(u32, num_online_cpus(),
6353 if (bnx2x_enable_msix(bp)) {
6354 /* failed to enable MSI-X */
6357 BNX2X_ERR("Multi requested but failed"
6358 " to enable MSI-X\n");
6362 "set number of queues to %d\n", bp->num_queues);
6364 if (bnx2x_alloc_mem(bp))
6367 for_each_queue(bp, i)
6368 bnx2x_fp(bp, i, disable_tpa) =
6369 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6371 if (bp->flags & USING_MSIX_FLAG) {
6372 rc = bnx2x_req_msix_irqs(bp);
6374 pci_disable_msix(bp->pdev);
6379 rc = bnx2x_req_irq(bp);
6381 BNX2X_ERR("IRQ request failed, aborting\n");
6386 for_each_queue(bp, i)
6387 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6391 rc = bnx2x_init_hw(bp, load_code);
6393 BNX2X_ERR("HW init failed, aborting\n");
6394 goto load_int_disable;
6397 /* Setup NIC internals and enable interrupts */
6398 bnx2x_nic_init(bp, load_code);
6400 /* Send LOAD_DONE command to MCP */
6401 if (!BP_NOMCP(bp)) {
6402 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6404 BNX2X_ERR("MCP response failure, aborting\n");
6406 goto load_rings_free;
6410 bnx2x_stats_init(bp);
6412 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6414 /* Enable Rx interrupt handling before sending the ramrod
6415 as it's completed on Rx FP queue */
6416 bnx2x_napi_enable(bp);
6418 /* Enable interrupt handling */
6419 atomic_set(&bp->intr_sem, 0);
6421 rc = bnx2x_setup_leading(bp);
6423 BNX2X_ERR("Setup leading failed!\n");
6424 goto load_netif_stop;
6427 if (CHIP_IS_E1H(bp))
6428 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6429 BNX2X_ERR("!!! mf_cfg function disabled\n");
6430 bp->state = BNX2X_STATE_DISABLED;
6433 if (bp->state == BNX2X_STATE_OPEN)
6434 for_each_nondefault_queue(bp, i) {
6435 rc = bnx2x_setup_multi(bp, i);
6437 goto load_netif_stop;
6441 bnx2x_set_mac_addr_e1(bp, 1);
6443 bnx2x_set_mac_addr_e1h(bp, 1);
6446 bnx2x_initial_phy_init(bp);
6448 /* Start fast path */
6449 switch (load_mode) {
6451 /* Tx queue should be only reenabled */
6452 netif_wake_queue(bp->dev);
6453 bnx2x_set_rx_mode(bp->dev);
6457 netif_start_queue(bp->dev);
6458 bnx2x_set_rx_mode(bp->dev);
6459 if (bp->flags & USING_MSIX_FLAG)
6460 printk(KERN_INFO PFX "%s: using MSI-X\n",
6465 bnx2x_set_rx_mode(bp->dev);
6466 bp->state = BNX2X_STATE_DIAG;
6474 bnx2x__link_status_update(bp);
6476 /* start the timer */
6477 mod_timer(&bp->timer, jiffies + bp->current_interval);
6483 bnx2x_napi_disable(bp);
6485 /* Free SKBs, SGEs, TPA pool and driver internals */
6486 bnx2x_free_skbs(bp);
6487 for_each_queue(bp, i)
6488 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6490 bnx2x_int_disable_sync(bp, 1);
6497 /* TBD we really need to reset the chip
6498 if we want to recover from this */
6502 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6506 /* halt the connection */
6507 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6508 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6510 /* Wait for completion */
6511 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6512 &(bp->fp[index].state), 1);
6513 if (rc) /* timeout */
6516 /* delete cfc entry */
6517 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6519 /* Wait for completion */
6520 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6521 &(bp->fp[index].state), 1);
6525 static int bnx2x_stop_leading(struct bnx2x *bp)
6527 u16 dsb_sp_prod_idx;
6528 /* if the other port is handling traffic,
6529 this can take a lot of time */
6535 /* Send HALT ramrod */
6536 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6537 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6539 /* Wait for completion */
6540 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6541 &(bp->fp[0].state), 1);
6542 if (rc) /* timeout */
6545 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6547 /* Send PORT_DELETE ramrod */
6548 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6550 /* Wait for completion to arrive on default status block
6551 we are going to reset the chip anyway
6552 so there is not much to do if this times out
6554 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6556 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6557 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6558 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6559 #ifdef BNX2X_STOP_ON_ERROR
6569 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6570 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6575 static void bnx2x_reset_func(struct bnx2x *bp)
6577 int port = BP_PORT(bp);
6578 int func = BP_FUNC(bp);
6582 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6583 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6585 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6588 base = FUNC_ILT_BASE(func);
6589 for (i = base; i < base + ILT_PER_FUNC; i++)
6590 bnx2x_ilt_wr(bp, i, 0);
6593 static void bnx2x_reset_port(struct bnx2x *bp)
6595 int port = BP_PORT(bp);
6598 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6600 /* Do not rcv packets to BRB */
6601 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6602 /* Do not direct rcv packets that are not for MCP to the BRB */
6603 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6604 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6607 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6610 /* Check for BRB port occupancy */
6611 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6613 DP(NETIF_MSG_IFDOWN,
6614 "BRB1 is not empty %d blocks are occupied\n", val);
6616 /* TODO: Close Doorbell port? */
6619 static void bnx2x_reset_common(struct bnx2x *bp)
6622 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6624 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6627 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6629 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6630 BP_FUNC(bp), reset_code);
6632 switch (reset_code) {
6633 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6634 bnx2x_reset_port(bp);
6635 bnx2x_reset_func(bp);
6636 bnx2x_reset_common(bp);
6639 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6640 bnx2x_reset_port(bp);
6641 bnx2x_reset_func(bp);
6644 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6645 bnx2x_reset_func(bp);
6649 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6654 /* must be called with rtnl_lock */
6655 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6657 int port = BP_PORT(bp);
6661 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6663 bp->rx_mode = BNX2X_RX_MODE_NONE;
6664 bnx2x_set_storm_rx_mode(bp);
6666 bnx2x_netif_stop(bp, 1);
6667 if (!netif_running(bp->dev))
6668 bnx2x_napi_disable(bp);
6669 del_timer_sync(&bp->timer);
6670 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6671 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6672 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6674 /* Wait until tx fast path tasks complete */
6675 for_each_queue(bp, i) {
6676 struct bnx2x_fastpath *fp = &bp->fp[i];
6680 while (BNX2X_HAS_TX_WORK(fp)) {
6682 bnx2x_tx_int(fp, 1000);
6684 BNX2X_ERR("timeout waiting for queue[%d]\n",
6686 #ifdef BNX2X_STOP_ON_ERROR
6698 /* Give HW time to discard old tx messages */
6704 if (CHIP_IS_E1(bp)) {
6705 struct mac_configuration_cmd *config =
6706 bnx2x_sp(bp, mcast_config);
6708 bnx2x_set_mac_addr_e1(bp, 0);
6710 for (i = 0; i < config->hdr.length_6b; i++)
6711 CAM_INVALIDATE(config->config_table[i]);
6713 config->hdr.length_6b = i;
6714 if (CHIP_REV_IS_SLOW(bp))
6715 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6717 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6718 config->hdr.client_id = BP_CL_ID(bp);
6719 config->hdr.reserved1 = 0;
6721 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6722 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6723 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6726 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6728 bnx2x_set_mac_addr_e1h(bp, 0);
6730 for (i = 0; i < MC_HASH_SIZE; i++)
6731 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6734 if (unload_mode == UNLOAD_NORMAL)
6735 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6737 else if (bp->flags & NO_WOL_FLAG) {
6738 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6739 if (CHIP_IS_E1H(bp))
6740 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6742 } else if (bp->wol) {
6743 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6744 u8 *mac_addr = bp->dev->dev_addr;
6746 /* The mac address is written to entries 1-4 to
6747 preserve entry 0 which is used by the PMF */
6748 u8 entry = (BP_E1HVN(bp) + 1)*8;
6750 val = (mac_addr[0] << 8) | mac_addr[1];
6751 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6753 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6754 (mac_addr[4] << 8) | mac_addr[5];
6755 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6757 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6760 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6762 /* Close multi and leading connections
6763 Completions for ramrods are collected in a synchronous way */
6764 for_each_nondefault_queue(bp, i)
6765 if (bnx2x_stop_multi(bp, i))
6768 rc = bnx2x_stop_leading(bp);
6770 BNX2X_ERR("Stop leading failed!\n");
6771 #ifdef BNX2X_STOP_ON_ERROR
6780 reset_code = bnx2x_fw_command(bp, reset_code);
6782 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6783 load_count[0], load_count[1], load_count[2]);
6785 load_count[1 + port]--;
6786 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6787 load_count[0], load_count[1], load_count[2]);
6788 if (load_count[0] == 0)
6789 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6790 else if (load_count[1 + port] == 0)
6791 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6793 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6796 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6797 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6798 bnx2x__link_reset(bp);
6800 /* Reset the chip */
6801 bnx2x_reset_chip(bp, reset_code);
6803 /* Report UNLOAD_DONE to MCP */
6805 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6808 /* Free SKBs, SGEs, TPA pool and driver internals */
6809 bnx2x_free_skbs(bp);
6810 for_each_queue(bp, i)
6811 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6814 bp->state = BNX2X_STATE_CLOSED;
6816 netif_carrier_off(bp->dev);
6821 static void bnx2x_reset_task(struct work_struct *work)
6823 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6825 #ifdef BNX2X_STOP_ON_ERROR
6826 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6827 " so reset not done to allow debug dump,\n"
6828 KERN_ERR " you will need to reboot when done\n");
6834 if (!netif_running(bp->dev))
6835 goto reset_task_exit;
6837 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6838 bnx2x_nic_load(bp, LOAD_NORMAL);
6844 /* end of nic load/unload */
6849 * Init service functions
6852 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6856 /* Check if there is any driver already loaded */
6857 val = REG_RD(bp, MISC_REG_UNPREPARED);
6859 /* Check if it is the UNDI driver
6860 * UNDI driver initializes CID offset for normal bell to 0x7
6862 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6863 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6865 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6866 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6869 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6871 int func = BP_FUNC(bp);
6875 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6877 /* try unload UNDI on port 0 */
6880 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6881 DRV_MSG_SEQ_NUMBER_MASK);
6882 reset_code = bnx2x_fw_command(bp, reset_code);
6884 /* if UNDI is loaded on the other port */
6885 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6887 /* send "DONE" for previous unload */
6888 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6890 /* unload UNDI on port 1 */
6893 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6894 DRV_MSG_SEQ_NUMBER_MASK);
6895 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6897 bnx2x_fw_command(bp, reset_code);
6900 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6901 HC_REG_CONFIG_0), 0x1000);
6903 /* close input traffic and wait for it */
6904 /* Do not rcv packets to BRB */
6906 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6907 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6908 /* Do not direct rcv packets that are not for MCP to
6911 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6912 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6915 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6916 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6919 /* save NIG port swap info */
6920 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6921 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6924 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6927 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6929 /* take the NIG out of reset and restore swap values */
6931 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6932 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6933 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6934 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6936 /* send unload done to the MCP */
6937 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6939 /* restore our func and fw_seq */
6942 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6943 DRV_MSG_SEQ_NUMBER_MASK);
6948 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6950 u32 val, val2, val3, val4, id;
6953 /* Get the chip revision id and number. */
6954 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6955 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6956 id = ((val & 0xffff) << 16);
6957 val = REG_RD(bp, MISC_REG_CHIP_REV);
6958 id |= ((val & 0xf) << 12);
6959 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6960 id |= ((val & 0xff) << 4);
6961 REG_RD(bp, MISC_REG_BOND_ID);
6963 bp->common.chip_id = id;
6964 bp->link_params.chip_id = bp->common.chip_id;
6965 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6967 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6968 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6969 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6970 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6971 bp->common.flash_size, bp->common.flash_size);
6973 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6974 bp->link_params.shmem_base = bp->common.shmem_base;
6975 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6977 if (!bp->common.shmem_base ||
6978 (bp->common.shmem_base < 0xA0000) ||
6979 (bp->common.shmem_base >= 0xC0000)) {
6980 BNX2X_DEV_INFO("MCP not active\n");
6981 bp->flags |= NO_MCP_FLAG;
6985 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6986 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6987 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6988 BNX2X_ERR("BAD MCP validity signature\n");
6990 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6991 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6993 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6994 bp->common.hw_config, bp->common.board);
6996 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6997 SHARED_HW_CFG_LED_MODE_MASK) >>
6998 SHARED_HW_CFG_LED_MODE_SHIFT);
7000 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7001 bp->common.bc_ver = val;
7002 BNX2X_DEV_INFO("bc_ver %X\n", val);
7003 if (val < BNX2X_BC_VER) {
7004 /* for now only warn
7005 * later we might need to enforce this */
7006 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7007 " please upgrade BC\n", BNX2X_BC_VER, val);
7010 if (BP_E1HVN(bp) == 0) {
7011 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7012 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7014 /* no WOL capability for E1HVN != 0 */
7015 bp->flags |= NO_WOL_FLAG;
7017 BNX2X_DEV_INFO("%sWoL capable\n",
7018 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7020 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7021 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7022 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7023 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7025 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7026 val, val2, val3, val4);
7029 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7032 int port = BP_PORT(bp);
7035 switch (switch_cfg) {
7037 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7040 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7041 switch (ext_phy_type) {
7042 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7043 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7046 bp->port.supported |= (SUPPORTED_10baseT_Half |
7047 SUPPORTED_10baseT_Full |
7048 SUPPORTED_100baseT_Half |
7049 SUPPORTED_100baseT_Full |
7050 SUPPORTED_1000baseT_Full |
7051 SUPPORTED_2500baseX_Full |
7056 SUPPORTED_Asym_Pause);
7059 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7060 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7063 bp->port.supported |= (SUPPORTED_10baseT_Half |
7064 SUPPORTED_10baseT_Full |
7065 SUPPORTED_100baseT_Half |
7066 SUPPORTED_100baseT_Full |
7067 SUPPORTED_1000baseT_Full |
7072 SUPPORTED_Asym_Pause);
7076 BNX2X_ERR("NVRAM config error. "
7077 "BAD SerDes ext_phy_config 0x%x\n",
7078 bp->link_params.ext_phy_config);
7082 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7084 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7087 case SWITCH_CFG_10G:
7088 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7091 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7092 switch (ext_phy_type) {
7093 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7094 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7097 bp->port.supported |= (SUPPORTED_10baseT_Half |
7098 SUPPORTED_10baseT_Full |
7099 SUPPORTED_100baseT_Half |
7100 SUPPORTED_100baseT_Full |
7101 SUPPORTED_1000baseT_Full |
7102 SUPPORTED_2500baseX_Full |
7103 SUPPORTED_10000baseT_Full |
7108 SUPPORTED_Asym_Pause);
7111 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7112 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7115 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7118 SUPPORTED_Asym_Pause);
7121 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7122 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7125 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7126 SUPPORTED_1000baseT_Full |
7129 SUPPORTED_Asym_Pause);
7132 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7133 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7136 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7137 SUPPORTED_1000baseT_Full |
7141 SUPPORTED_Asym_Pause);
7144 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7145 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7148 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7149 SUPPORTED_2500baseX_Full |
7150 SUPPORTED_1000baseT_Full |
7154 SUPPORTED_Asym_Pause);
7157 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7158 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7161 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7165 SUPPORTED_Asym_Pause);
7168 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7169 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7170 bp->link_params.ext_phy_config);
7174 BNX2X_ERR("NVRAM config error. "
7175 "BAD XGXS ext_phy_config 0x%x\n",
7176 bp->link_params.ext_phy_config);
7180 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7182 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7187 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7188 bp->port.link_config);
7191 bp->link_params.phy_addr = bp->port.phy_addr;
7193 /* mask what we support according to speed_cap_mask */
7194 if (!(bp->link_params.speed_cap_mask &
7195 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7196 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7198 if (!(bp->link_params.speed_cap_mask &
7199 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7200 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7202 if (!(bp->link_params.speed_cap_mask &
7203 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7204 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7206 if (!(bp->link_params.speed_cap_mask &
7207 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7208 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7210 if (!(bp->link_params.speed_cap_mask &
7211 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7212 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7213 SUPPORTED_1000baseT_Full);
7215 if (!(bp->link_params.speed_cap_mask &
7216 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7217 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7219 if (!(bp->link_params.speed_cap_mask &
7220 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7221 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7223 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7226 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7228 bp->link_params.req_duplex = DUPLEX_FULL;
7230 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7231 case PORT_FEATURE_LINK_SPEED_AUTO:
7232 if (bp->port.supported & SUPPORTED_Autoneg) {
7233 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7234 bp->port.advertising = bp->port.supported;
7237 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7239 if ((ext_phy_type ==
7240 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7242 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7243 /* force 10G, no AN */
7244 bp->link_params.req_line_speed = SPEED_10000;
7245 bp->port.advertising =
7246 (ADVERTISED_10000baseT_Full |
7250 BNX2X_ERR("NVRAM config error. "
7251 "Invalid link_config 0x%x"
7252 " Autoneg not supported\n",
7253 bp->port.link_config);
7258 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7259 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7260 bp->link_params.req_line_speed = SPEED_10;
7261 bp->port.advertising = (ADVERTISED_10baseT_Full |
7264 BNX2X_ERR("NVRAM config error. "
7265 "Invalid link_config 0x%x"
7266 " speed_cap_mask 0x%x\n",
7267 bp->port.link_config,
7268 bp->link_params.speed_cap_mask);
7273 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7274 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7275 bp->link_params.req_line_speed = SPEED_10;
7276 bp->link_params.req_duplex = DUPLEX_HALF;
7277 bp->port.advertising = (ADVERTISED_10baseT_Half |
7280 BNX2X_ERR("NVRAM config error. "
7281 "Invalid link_config 0x%x"
7282 " speed_cap_mask 0x%x\n",
7283 bp->port.link_config,
7284 bp->link_params.speed_cap_mask);
7289 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7290 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7291 bp->link_params.req_line_speed = SPEED_100;
7292 bp->port.advertising = (ADVERTISED_100baseT_Full |
7295 BNX2X_ERR("NVRAM config error. "
7296 "Invalid link_config 0x%x"
7297 " speed_cap_mask 0x%x\n",
7298 bp->port.link_config,
7299 bp->link_params.speed_cap_mask);
7304 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7305 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7306 bp->link_params.req_line_speed = SPEED_100;
7307 bp->link_params.req_duplex = DUPLEX_HALF;
7308 bp->port.advertising = (ADVERTISED_100baseT_Half |
7311 BNX2X_ERR("NVRAM config error. "
7312 "Invalid link_config 0x%x"
7313 " speed_cap_mask 0x%x\n",
7314 bp->port.link_config,
7315 bp->link_params.speed_cap_mask);
7320 case PORT_FEATURE_LINK_SPEED_1G:
7321 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7322 bp->link_params.req_line_speed = SPEED_1000;
7323 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7326 BNX2X_ERR("NVRAM config error. "
7327 "Invalid link_config 0x%x"
7328 " speed_cap_mask 0x%x\n",
7329 bp->port.link_config,
7330 bp->link_params.speed_cap_mask);
7335 case PORT_FEATURE_LINK_SPEED_2_5G:
7336 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7337 bp->link_params.req_line_speed = SPEED_2500;
7338 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7341 BNX2X_ERR("NVRAM config error. "
7342 "Invalid link_config 0x%x"
7343 " speed_cap_mask 0x%x\n",
7344 bp->port.link_config,
7345 bp->link_params.speed_cap_mask);
7350 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7351 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7352 case PORT_FEATURE_LINK_SPEED_10G_KR:
7353 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7354 bp->link_params.req_line_speed = SPEED_10000;
7355 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7358 BNX2X_ERR("NVRAM config error. "
7359 "Invalid link_config 0x%x"
7360 " speed_cap_mask 0x%x\n",
7361 bp->port.link_config,
7362 bp->link_params.speed_cap_mask);
7368 BNX2X_ERR("NVRAM config error. "
7369 "BAD link speed link_config 0x%x\n",
7370 bp->port.link_config);
7371 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7372 bp->port.advertising = bp->port.supported;
7376 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7377 PORT_FEATURE_FLOW_CONTROL_MASK);
7378 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7379 !(bp->port.supported & SUPPORTED_Autoneg))
7380 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7382 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7383 " advertising 0x%x\n",
7384 bp->link_params.req_line_speed,
7385 bp->link_params.req_duplex,
7386 bp->link_params.req_flow_ctrl, bp->port.advertising);
7389 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7391 int port = BP_PORT(bp);
7394 bp->link_params.bp = bp;
7395 bp->link_params.port = port;
7397 bp->link_params.serdes_config =
7398 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7399 bp->link_params.lane_config =
7400 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7401 bp->link_params.ext_phy_config =
7403 dev_info.port_hw_config[port].external_phy_config);
7404 bp->link_params.speed_cap_mask =
7406 dev_info.port_hw_config[port].speed_capability_mask);
7408 bp->port.link_config =
7409 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7411 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7412 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7413 " link_config 0x%08x\n",
7414 bp->link_params.serdes_config,
7415 bp->link_params.lane_config,
7416 bp->link_params.ext_phy_config,
7417 bp->link_params.speed_cap_mask, bp->port.link_config);
7419 bp->link_params.switch_cfg = (bp->port.link_config &
7420 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7421 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7423 bnx2x_link_settings_requested(bp);
7425 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7426 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7427 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7428 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7429 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7430 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7431 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7432 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7433 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7434 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7437 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7439 int func = BP_FUNC(bp);
7443 bnx2x_get_common_hwinfo(bp);
7447 if (CHIP_IS_E1H(bp)) {
7449 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7451 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7452 FUNC_MF_CFG_E1HOV_TAG_MASK);
7453 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7457 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7459 func, bp->e1hov, bp->e1hov);
7461 BNX2X_DEV_INFO("Single function mode\n");
7463 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7464 " aborting\n", func);
7470 if (!BP_NOMCP(bp)) {
7471 bnx2x_get_port_hwinfo(bp);
7473 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7474 DRV_MSG_SEQ_NUMBER_MASK);
7475 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7479 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7480 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7481 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7482 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7483 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7484 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7485 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7486 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7487 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7488 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7489 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7491 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7499 /* only supposed to happen on emulation/FPGA */
7500 BNX2X_ERR("warning random MAC workaround active\n");
7501 random_ether_addr(bp->dev->dev_addr);
7502 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7508 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7510 int func = BP_FUNC(bp);
7513 /* Disable interrupt handling until HW is initialized */
7514 atomic_set(&bp->intr_sem, 1);
7516 mutex_init(&bp->port.phy_mutex);
7518 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7519 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7521 rc = bnx2x_get_hwinfo(bp);
7523 /* need to reset chip if undi was active */
7525 bnx2x_undi_unload(bp);
7527 if (CHIP_REV_IS_FPGA(bp))
7528 printk(KERN_ERR PFX "FPGA detected\n");
7530 if (BP_NOMCP(bp) && (func == 0))
7532 "MCP disabled, must load devices in order!\n");
7536 bp->flags &= ~TPA_ENABLE_FLAG;
7537 bp->dev->features &= ~NETIF_F_LRO;
7539 bp->flags |= TPA_ENABLE_FLAG;
7540 bp->dev->features |= NETIF_F_LRO;
7544 bp->tx_ring_size = MAX_TX_AVAIL;
7545 bp->rx_ring_size = MAX_RX_AVAIL;
7553 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7554 bp->current_interval = (poll ? poll : bp->timer_interval);
7556 init_timer(&bp->timer);
7557 bp->timer.expires = jiffies + bp->current_interval;
7558 bp->timer.data = (unsigned long) bp;
7559 bp->timer.function = bnx2x_timer;
7565 * ethtool service functions
7568 /* All ethtool functions called with rtnl_lock */
7570 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7572 struct bnx2x *bp = netdev_priv(dev);
7574 cmd->supported = bp->port.supported;
7575 cmd->advertising = bp->port.advertising;
7577 if (netif_carrier_ok(dev)) {
7578 cmd->speed = bp->link_vars.line_speed;
7579 cmd->duplex = bp->link_vars.duplex;
7581 cmd->speed = bp->link_params.req_line_speed;
7582 cmd->duplex = bp->link_params.req_duplex;
7587 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7588 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7589 if (vn_max_rate < cmd->speed)
7590 cmd->speed = vn_max_rate;
7593 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7595 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7597 switch (ext_phy_type) {
7598 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7599 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7600 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7601 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7602 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7603 cmd->port = PORT_FIBRE;
7606 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7607 cmd->port = PORT_TP;
7610 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7611 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7612 bp->link_params.ext_phy_config);
7616 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7617 bp->link_params.ext_phy_config);
7621 cmd->port = PORT_TP;
7623 cmd->phy_address = bp->port.phy_addr;
7624 cmd->transceiver = XCVR_INTERNAL;
7626 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7627 cmd->autoneg = AUTONEG_ENABLE;
7629 cmd->autoneg = AUTONEG_DISABLE;
7634 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7635 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7636 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7637 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7638 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7639 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7640 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7645 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7647 struct bnx2x *bp = netdev_priv(dev);
7653 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7654 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7655 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7656 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7657 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7658 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7659 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7661 if (cmd->autoneg == AUTONEG_ENABLE) {
7662 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7663 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7667 /* advertise the requested speed and duplex if supported */
7668 cmd->advertising &= bp->port.supported;
7670 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7671 bp->link_params.req_duplex = DUPLEX_FULL;
7672 bp->port.advertising |= (ADVERTISED_Autoneg |
7675 } else { /* forced speed */
7676 /* advertise the requested speed and duplex if supported */
7677 switch (cmd->speed) {
7679 if (cmd->duplex == DUPLEX_FULL) {
7680 if (!(bp->port.supported &
7681 SUPPORTED_10baseT_Full)) {
7683 "10M full not supported\n");
7687 advertising = (ADVERTISED_10baseT_Full |
7690 if (!(bp->port.supported &
7691 SUPPORTED_10baseT_Half)) {
7693 "10M half not supported\n");
7697 advertising = (ADVERTISED_10baseT_Half |
7703 if (cmd->duplex == DUPLEX_FULL) {
7704 if (!(bp->port.supported &
7705 SUPPORTED_100baseT_Full)) {
7707 "100M full not supported\n");
7711 advertising = (ADVERTISED_100baseT_Full |
7714 if (!(bp->port.supported &
7715 SUPPORTED_100baseT_Half)) {
7717 "100M half not supported\n");
7721 advertising = (ADVERTISED_100baseT_Half |
7727 if (cmd->duplex != DUPLEX_FULL) {
7728 DP(NETIF_MSG_LINK, "1G half not supported\n");
7732 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7733 DP(NETIF_MSG_LINK, "1G full not supported\n");
7737 advertising = (ADVERTISED_1000baseT_Full |
7742 if (cmd->duplex != DUPLEX_FULL) {
7744 "2.5G half not supported\n");
7748 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7750 "2.5G full not supported\n");
7754 advertising = (ADVERTISED_2500baseX_Full |
7759 if (cmd->duplex != DUPLEX_FULL) {
7760 DP(NETIF_MSG_LINK, "10G half not supported\n");
7764 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7765 DP(NETIF_MSG_LINK, "10G full not supported\n");
7769 advertising = (ADVERTISED_10000baseT_Full |
7774 DP(NETIF_MSG_LINK, "Unsupported speed\n");
7778 bp->link_params.req_line_speed = cmd->speed;
7779 bp->link_params.req_duplex = cmd->duplex;
7780 bp->port.advertising = advertising;
7783 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7784 DP_LEVEL " req_duplex %d advertising 0x%x\n",
7785 bp->link_params.req_line_speed, bp->link_params.req_duplex,
7786 bp->port.advertising);
7788 if (netif_running(dev)) {
7789 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7796 #define PHY_FW_VER_LEN 10
7798 static void bnx2x_get_drvinfo(struct net_device *dev,
7799 struct ethtool_drvinfo *info)
7801 struct bnx2x *bp = netdev_priv(dev);
7802 u8 phy_fw_ver[PHY_FW_VER_LEN];
7804 strcpy(info->driver, DRV_MODULE_NAME);
7805 strcpy(info->version, DRV_MODULE_VERSION);
7807 phy_fw_ver[0] = '\0';
7809 bnx2x_acquire_phy_lock(bp);
7810 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7811 (bp->state != BNX2X_STATE_CLOSED),
7812 phy_fw_ver, PHY_FW_VER_LEN);
7813 bnx2x_release_phy_lock(bp);
7816 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7817 (bp->common.bc_ver & 0xff0000) >> 16,
7818 (bp->common.bc_ver & 0xff00) >> 8,
7819 (bp->common.bc_ver & 0xff),
7820 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7821 strcpy(info->bus_info, pci_name(bp->pdev));
7822 info->n_stats = BNX2X_NUM_STATS;
7823 info->testinfo_len = BNX2X_NUM_TESTS;
7824 info->eedump_len = bp->common.flash_size;
7825 info->regdump_len = 0;
7828 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7830 struct bnx2x *bp = netdev_priv(dev);
7832 if (bp->flags & NO_WOL_FLAG) {
7836 wol->supported = WAKE_MAGIC;
7838 wol->wolopts = WAKE_MAGIC;
7842 memset(&wol->sopass, 0, sizeof(wol->sopass));
7845 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7847 struct bnx2x *bp = netdev_priv(dev);
7849 if (wol->wolopts & ~WAKE_MAGIC)
7852 if (wol->wolopts & WAKE_MAGIC) {
7853 if (bp->flags & NO_WOL_FLAG)
7863 static u32 bnx2x_get_msglevel(struct net_device *dev)
7865 struct bnx2x *bp = netdev_priv(dev);
7867 return bp->msglevel;
7870 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7872 struct bnx2x *bp = netdev_priv(dev);
7874 if (capable(CAP_NET_ADMIN))
7875 bp->msglevel = level;
7878 static int bnx2x_nway_reset(struct net_device *dev)
7880 struct bnx2x *bp = netdev_priv(dev);
7885 if (netif_running(dev)) {
7886 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7893 static int bnx2x_get_eeprom_len(struct net_device *dev)
7895 struct bnx2x *bp = netdev_priv(dev);
7897 return bp->common.flash_size;
7900 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7902 int port = BP_PORT(bp);
7906 /* adjust timeout for emulation/FPGA */
7907 count = NVRAM_TIMEOUT_COUNT;
7908 if (CHIP_REV_IS_SLOW(bp))
7911 /* request access to nvram interface */
7912 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7913 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7915 for (i = 0; i < count*10; i++) {
7916 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7917 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7923 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7924 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7931 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7933 int port = BP_PORT(bp);
7937 /* adjust timeout for emulation/FPGA */
7938 count = NVRAM_TIMEOUT_COUNT;
7939 if (CHIP_REV_IS_SLOW(bp))
7942 /* relinquish nvram interface */
7943 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7944 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7946 for (i = 0; i < count*10; i++) {
7947 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7948 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7954 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7955 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7962 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7966 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7968 /* enable both bits, even on read */
7969 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7970 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7971 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7974 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7978 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7980 /* disable both bits, even after read */
7981 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7982 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7983 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7986 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7992 /* build the command word */
7993 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7995 /* need to clear DONE bit separately */
7996 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7998 /* address of the NVRAM to read from */
7999 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8000 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8002 /* issue a read command */
8003 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8005 /* adjust timeout for emulation/FPGA */
8006 count = NVRAM_TIMEOUT_COUNT;
8007 if (CHIP_REV_IS_SLOW(bp))
8010 /* wait for completion */
8013 for (i = 0; i < count; i++) {
8015 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8017 if (val & MCPR_NVM_COMMAND_DONE) {
8018 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8019 /* we read nvram data in cpu order
8020 * but ethtool sees it as an array of bytes
8021 * converting to big-endian will do the work */
8022 val = cpu_to_be32(val);
8032 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8039 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8041 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8046 if (offset + buf_size > bp->common.flash_size) {
8047 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8048 " buf_size (0x%x) > flash_size (0x%x)\n",
8049 offset, buf_size, bp->common.flash_size);
8053 /* request access to nvram interface */
8054 rc = bnx2x_acquire_nvram_lock(bp);
8058 /* enable access to nvram interface */
8059 bnx2x_enable_nvram_access(bp);
8061 /* read the first word(s) */
8062 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8063 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8064 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8065 memcpy(ret_buf, &val, 4);
8067 /* advance to the next dword */
8068 offset += sizeof(u32);
8069 ret_buf += sizeof(u32);
8070 buf_size -= sizeof(u32);
8075 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8076 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8077 memcpy(ret_buf, &val, 4);
8080 /* disable access to nvram interface */
8081 bnx2x_disable_nvram_access(bp);
8082 bnx2x_release_nvram_lock(bp);
8087 static int bnx2x_get_eeprom(struct net_device *dev,
8088 struct ethtool_eeprom *eeprom, u8 *eebuf)
8090 struct bnx2x *bp = netdev_priv(dev);
8093 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8094 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8095 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8096 eeprom->len, eeprom->len);
8098 /* parameters already validated in ethtool_get_eeprom */
8100 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8105 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8110 /* build the command word */
8111 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8113 /* need to clear DONE bit separately */
8114 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8116 /* write the data */
8117 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8119 /* address of the NVRAM to write to */
8120 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8121 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8123 /* issue the write command */
8124 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8126 /* adjust timeout for emulation/FPGA */
8127 count = NVRAM_TIMEOUT_COUNT;
8128 if (CHIP_REV_IS_SLOW(bp))
8131 /* wait for completion */
8133 for (i = 0; i < count; i++) {
8135 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8136 if (val & MCPR_NVM_COMMAND_DONE) {
8145 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8147 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8155 if (offset + buf_size > bp->common.flash_size) {
8156 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8157 " buf_size (0x%x) > flash_size (0x%x)\n",
8158 offset, buf_size, bp->common.flash_size);
8162 /* request access to nvram interface */
8163 rc = bnx2x_acquire_nvram_lock(bp);
8167 /* enable access to nvram interface */
8168 bnx2x_enable_nvram_access(bp);
8170 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8171 align_offset = (offset & ~0x03);
8172 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8175 val &= ~(0xff << BYTE_OFFSET(offset));
8176 val |= (*data_buf << BYTE_OFFSET(offset));
8178 /* nvram data is returned as an array of bytes
8179 * convert it back to cpu order */
8180 val = be32_to_cpu(val);
8182 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8186 /* disable access to nvram interface */
8187 bnx2x_disable_nvram_access(bp);
8188 bnx2x_release_nvram_lock(bp);
8193 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8201 if (buf_size == 1) /* ethtool */
8202 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8204 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8206 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8211 if (offset + buf_size > bp->common.flash_size) {
8212 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8213 " buf_size (0x%x) > flash_size (0x%x)\n",
8214 offset, buf_size, bp->common.flash_size);
8218 /* request access to nvram interface */
8219 rc = bnx2x_acquire_nvram_lock(bp);
8223 /* enable access to nvram interface */
8224 bnx2x_enable_nvram_access(bp);
8227 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8228 while ((written_so_far < buf_size) && (rc == 0)) {
8229 if (written_so_far == (buf_size - sizeof(u32)))
8230 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8231 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8232 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8233 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8234 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8236 memcpy(&val, data_buf, 4);
8238 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8240 /* advance to the next dword */
8241 offset += sizeof(u32);
8242 data_buf += sizeof(u32);
8243 written_so_far += sizeof(u32);
8247 /* disable access to nvram interface */
8248 bnx2x_disable_nvram_access(bp);
8249 bnx2x_release_nvram_lock(bp);
8254 static int bnx2x_set_eeprom(struct net_device *dev,
8255 struct ethtool_eeprom *eeprom, u8 *eebuf)
8257 struct bnx2x *bp = netdev_priv(dev);
8260 if (!netif_running(dev))
8263 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8264 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8265 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8266 eeprom->len, eeprom->len);
8268 /* parameters already validated in ethtool_set_eeprom */
8270 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8271 if (eeprom->magic == 0x00504859)
8274 bnx2x_acquire_phy_lock(bp);
8275 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8276 bp->link_params.ext_phy_config,
8277 (bp->state != BNX2X_STATE_CLOSED),
8278 eebuf, eeprom->len);
8279 if ((bp->state == BNX2X_STATE_OPEN) ||
8280 (bp->state == BNX2X_STATE_DISABLED)) {
8281 rc |= bnx2x_link_reset(&bp->link_params,
8283 rc |= bnx2x_phy_init(&bp->link_params,
8286 bnx2x_release_phy_lock(bp);
8288 } else /* Only the PMF can access the PHY */
8291 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8296 static int bnx2x_get_coalesce(struct net_device *dev,
8297 struct ethtool_coalesce *coal)
8299 struct bnx2x *bp = netdev_priv(dev);
8301 memset(coal, 0, sizeof(struct ethtool_coalesce));
8303 coal->rx_coalesce_usecs = bp->rx_ticks;
8304 coal->tx_coalesce_usecs = bp->tx_ticks;
8309 static int bnx2x_set_coalesce(struct net_device *dev,
8310 struct ethtool_coalesce *coal)
8312 struct bnx2x *bp = netdev_priv(dev);
8314 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8315 if (bp->rx_ticks > 3000)
8316 bp->rx_ticks = 3000;
8318 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8319 if (bp->tx_ticks > 0x3000)
8320 bp->tx_ticks = 0x3000;
8322 if (netif_running(dev))
8323 bnx2x_update_coalesce(bp);
8328 static void bnx2x_get_ringparam(struct net_device *dev,
8329 struct ethtool_ringparam *ering)
8331 struct bnx2x *bp = netdev_priv(dev);
8333 ering->rx_max_pending = MAX_RX_AVAIL;
8334 ering->rx_mini_max_pending = 0;
8335 ering->rx_jumbo_max_pending = 0;
8337 ering->rx_pending = bp->rx_ring_size;
8338 ering->rx_mini_pending = 0;
8339 ering->rx_jumbo_pending = 0;
8341 ering->tx_max_pending = MAX_TX_AVAIL;
8342 ering->tx_pending = bp->tx_ring_size;
8345 static int bnx2x_set_ringparam(struct net_device *dev,
8346 struct ethtool_ringparam *ering)
8348 struct bnx2x *bp = netdev_priv(dev);
8351 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8352 (ering->tx_pending > MAX_TX_AVAIL) ||
8353 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8356 bp->rx_ring_size = ering->rx_pending;
8357 bp->tx_ring_size = ering->tx_pending;
8359 if (netif_running(dev)) {
8360 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8361 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8367 static void bnx2x_get_pauseparam(struct net_device *dev,
8368 struct ethtool_pauseparam *epause)
8370 struct bnx2x *bp = netdev_priv(dev);
8372 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8373 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8375 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8376 BNX2X_FLOW_CTRL_RX);
8377 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8378 BNX2X_FLOW_CTRL_TX);
8380 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8381 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8382 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8385 static int bnx2x_set_pauseparam(struct net_device *dev,
8386 struct ethtool_pauseparam *epause)
8388 struct bnx2x *bp = netdev_priv(dev);
8393 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8394 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8395 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8397 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8399 if (epause->rx_pause)
8400 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8402 if (epause->tx_pause)
8403 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8405 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8406 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8408 if (epause->autoneg) {
8409 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8410 DP(NETIF_MSG_LINK, "autoneg not supported\n");
8414 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8415 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8419 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8421 if (netif_running(dev)) {
8422 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8429 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8431 struct bnx2x *bp = netdev_priv(dev);
8435 /* TPA requires Rx CSUM offloading */
8436 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8437 if (!(dev->features & NETIF_F_LRO)) {
8438 dev->features |= NETIF_F_LRO;
8439 bp->flags |= TPA_ENABLE_FLAG;
8443 } else if (dev->features & NETIF_F_LRO) {
8444 dev->features &= ~NETIF_F_LRO;
8445 bp->flags &= ~TPA_ENABLE_FLAG;
8449 if (changed && netif_running(dev)) {
8450 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8451 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8457 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8459 struct bnx2x *bp = netdev_priv(dev);
8464 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8466 struct bnx2x *bp = netdev_priv(dev);
8471 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8472 TPA'ed packets will be discarded due to wrong TCP CSUM */
8474 u32 flags = ethtool_op_get_flags(dev);
8476 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8482 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8485 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8486 dev->features |= NETIF_F_TSO6;
8488 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8489 dev->features &= ~NETIF_F_TSO6;
8495 static const struct {
8496 char string[ETH_GSTRING_LEN];
8497 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8498 { "register_test (offline)" },
8499 { "memory_test (offline)" },
8500 { "loopback_test (offline)" },
8501 { "nvram_test (online)" },
8502 { "interrupt_test (online)" },
8503 { "link_test (online)" },
8504 { "idle check (online)" },
8505 { "MC errors (online)" }
8508 static int bnx2x_self_test_count(struct net_device *dev)
8510 return BNX2X_NUM_TESTS;
8513 static int bnx2x_test_registers(struct bnx2x *bp)
8515 int idx, i, rc = -ENODEV;
8517 int port = BP_PORT(bp);
8518 static const struct {
8523 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8524 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8525 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8526 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8527 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8528 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8529 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8530 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8531 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8532 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8533 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8534 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8535 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8536 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8537 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8538 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8539 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8540 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8541 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8542 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8543 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8544 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8545 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8546 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8547 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8548 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8549 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8550 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8551 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8552 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8553 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8554 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8555 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8556 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8557 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8558 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8559 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8560 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8562 { 0xffffffff, 0, 0x00000000 }
8565 if (!netif_running(bp->dev))
8568 /* Repeat the test twice:
8569 First by writing 0x00000000, second by writing 0xffffffff */
8570 for (idx = 0; idx < 2; idx++) {
8577 wr_val = 0xffffffff;
8581 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8582 u32 offset, mask, save_val, val;
8584 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8585 mask = reg_tbl[i].mask;
8587 save_val = REG_RD(bp, offset);
8589 REG_WR(bp, offset, wr_val);
8590 val = REG_RD(bp, offset);
8592 /* Restore the original register's value */
8593 REG_WR(bp, offset, save_val);
8595 /* verify that value is as expected value */
8596 if ((val & mask) != (wr_val & mask))
8607 static int bnx2x_test_memory(struct bnx2x *bp)
8609 int i, j, rc = -ENODEV;
8611 static const struct {
8615 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8616 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8617 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8618 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8619 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8620 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8621 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8625 static const struct {
8631 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8632 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8633 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8634 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8635 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8636 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8638 { NULL, 0xffffffff, 0, 0 }
8641 if (!netif_running(bp->dev))
8644 /* Go through all the memories */
8645 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8646 for (j = 0; j < mem_tbl[i].size; j++)
8647 REG_RD(bp, mem_tbl[i].offset + j*4);
8649 /* Check the parity status */
8650 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8651 val = REG_RD(bp, prty_tbl[i].offset);
8652 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8653 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8655 "%s is 0x%x\n", prty_tbl[i].name, val);
8666 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8671 while (bnx2x_link_test(bp) && cnt--)
8675 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8677 unsigned int pkt_size, num_pkts, i;
8678 struct sk_buff *skb;
8679 unsigned char *packet;
8680 struct bnx2x_fastpath *fp = &bp->fp[0];
8681 u16 tx_start_idx, tx_idx;
8682 u16 rx_start_idx, rx_idx;
8684 struct sw_tx_bd *tx_buf;
8685 struct eth_tx_bd *tx_bd;
8687 union eth_rx_cqe *cqe;
8689 struct sw_rx_bd *rx_buf;
8693 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8694 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8695 bnx2x_acquire_phy_lock(bp);
8696 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8697 bnx2x_release_phy_lock(bp);
8699 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8700 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8701 bnx2x_acquire_phy_lock(bp);
8702 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8703 bnx2x_release_phy_lock(bp);
8704 /* wait until link state is restored */
8705 bnx2x_wait_for_link(bp, link_up);
8711 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8714 goto test_loopback_exit;
8716 packet = skb_put(skb, pkt_size);
8717 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8718 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8719 for (i = ETH_HLEN; i < pkt_size; i++)
8720 packet[i] = (unsigned char) (i & 0xff);
8723 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8724 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8726 pkt_prod = fp->tx_pkt_prod++;
8727 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8728 tx_buf->first_bd = fp->tx_bd_prod;
8731 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8732 mapping = pci_map_single(bp->pdev, skb->data,
8733 skb_headlen(skb), PCI_DMA_TODEVICE);
8734 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8735 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8736 tx_bd->nbd = cpu_to_le16(1);
8737 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8738 tx_bd->vlan = cpu_to_le16(pkt_prod);
8739 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8740 ETH_TX_BD_FLAGS_END_BD);
8741 tx_bd->general_data = ((UNICAST_ADDRESS <<
8742 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8746 fp->hw_tx_prods->bds_prod =
8747 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8748 mb(); /* FW restriction: must not reorder writing nbd and packets */
8749 fp->hw_tx_prods->packets_prod =
8750 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8751 DOORBELL(bp, FP_IDX(fp), 0);
8757 bp->dev->trans_start = jiffies;
8761 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8762 if (tx_idx != tx_start_idx + num_pkts)
8763 goto test_loopback_exit;
8765 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8766 if (rx_idx != rx_start_idx + num_pkts)
8767 goto test_loopback_exit;
8769 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8770 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8771 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8772 goto test_loopback_rx_exit;
8774 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8775 if (len != pkt_size)
8776 goto test_loopback_rx_exit;
8778 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8780 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8781 for (i = ETH_HLEN; i < pkt_size; i++)
8782 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8783 goto test_loopback_rx_exit;
8787 test_loopback_rx_exit:
8789 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8790 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8791 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8792 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8794 /* Update producers */
8795 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8799 bp->link_params.loopback_mode = LOOPBACK_NONE;
8804 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8808 if (!netif_running(bp->dev))
8809 return BNX2X_LOOPBACK_FAILED;
8811 bnx2x_netif_stop(bp, 1);
8813 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8814 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8815 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8818 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8819 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8820 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8823 bnx2x_netif_start(bp);
8828 #define CRC32_RESIDUAL 0xdebb20e3
8830 static int bnx2x_test_nvram(struct bnx2x *bp)
8832 static const struct {
8836 { 0, 0x14 }, /* bootstrap */
8837 { 0x14, 0xec }, /* dir */
8838 { 0x100, 0x350 }, /* manuf_info */
8839 { 0x450, 0xf0 }, /* feature_info */
8840 { 0x640, 0x64 }, /* upgrade_key_info */
8842 { 0x708, 0x70 }, /* manuf_key_info */
8847 u8 *data = (u8 *)buf;
8851 rc = bnx2x_nvram_read(bp, 0, data, 4);
8853 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8854 goto test_nvram_exit;
8857 magic = be32_to_cpu(buf[0]);
8858 if (magic != 0x669955aa) {
8859 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8861 goto test_nvram_exit;
8864 for (i = 0; nvram_tbl[i].size; i++) {
8866 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8870 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8871 goto test_nvram_exit;
8874 csum = ether_crc_le(nvram_tbl[i].size, data);
8875 if (csum != CRC32_RESIDUAL) {
8877 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8879 goto test_nvram_exit;
8887 static int bnx2x_test_intr(struct bnx2x *bp)
8889 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8892 if (!netif_running(bp->dev))
8895 config->hdr.length_6b = 0;
8896 config->hdr.offset = 0;
8897 config->hdr.client_id = BP_CL_ID(bp);
8898 config->hdr.reserved1 = 0;
8900 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8901 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8902 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8904 bp->set_mac_pending++;
8905 for (i = 0; i < 10; i++) {
8906 if (!bp->set_mac_pending)
8908 msleep_interruptible(10);
8917 static void bnx2x_self_test(struct net_device *dev,
8918 struct ethtool_test *etest, u64 *buf)
8920 struct bnx2x *bp = netdev_priv(dev);
8922 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8924 if (!netif_running(dev))
8927 /* offline tests are not supported in MF mode */
8929 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8931 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8934 link_up = bp->link_vars.link_up;
8935 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8936 bnx2x_nic_load(bp, LOAD_DIAG);
8937 /* wait until link state is restored */
8938 bnx2x_wait_for_link(bp, link_up);
8940 if (bnx2x_test_registers(bp) != 0) {
8942 etest->flags |= ETH_TEST_FL_FAILED;
8944 if (bnx2x_test_memory(bp) != 0) {
8946 etest->flags |= ETH_TEST_FL_FAILED;
8948 buf[2] = bnx2x_test_loopback(bp, link_up);
8950 etest->flags |= ETH_TEST_FL_FAILED;
8952 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8953 bnx2x_nic_load(bp, LOAD_NORMAL);
8954 /* wait until link state is restored */
8955 bnx2x_wait_for_link(bp, link_up);
8957 if (bnx2x_test_nvram(bp) != 0) {
8959 etest->flags |= ETH_TEST_FL_FAILED;
8961 if (bnx2x_test_intr(bp) != 0) {
8963 etest->flags |= ETH_TEST_FL_FAILED;
8966 if (bnx2x_link_test(bp) != 0) {
8968 etest->flags |= ETH_TEST_FL_FAILED;
8970 buf[7] = bnx2x_mc_assert(bp);
8972 etest->flags |= ETH_TEST_FL_FAILED;
8974 #ifdef BNX2X_EXTRA_DEBUG
8975 bnx2x_panic_dump(bp);
8979 static const struct {
8983 #define STATS_FLAGS_PORT 1
8984 #define STATS_FLAGS_FUNC 2
8985 u8 string[ETH_GSTRING_LEN];
8986 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8987 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8988 8, STATS_FLAGS_FUNC, "rx_bytes" },
8989 { STATS_OFFSET32(error_bytes_received_hi),
8990 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8991 { STATS_OFFSET32(total_bytes_transmitted_hi),
8992 8, STATS_FLAGS_FUNC, "tx_bytes" },
8993 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8994 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8995 { STATS_OFFSET32(total_unicast_packets_received_hi),
8996 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8997 { STATS_OFFSET32(total_multicast_packets_received_hi),
8998 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8999 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9000 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
9001 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9002 8, STATS_FLAGS_FUNC, "tx_packets" },
9003 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9004 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9005 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9006 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9007 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9008 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9009 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9010 8, STATS_FLAGS_PORT, "rx_align_errors" },
9011 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9012 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9013 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9014 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9015 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9016 8, STATS_FLAGS_PORT, "tx_deferred" },
9017 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9018 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9019 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9020 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9021 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9022 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9023 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9024 8, STATS_FLAGS_PORT, "rx_fragments" },
9025 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9026 8, STATS_FLAGS_PORT, "rx_jabbers" },
9027 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9028 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9029 { STATS_OFFSET32(jabber_packets_received),
9030 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9031 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9032 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9033 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9034 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9035 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9036 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9037 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9038 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9039 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9040 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9041 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9042 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9043 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9044 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9045 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9046 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9047 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9048 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9049 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9050 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9051 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9052 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9053 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9054 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9055 { STATS_OFFSET32(mac_filter_discard),
9056 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9057 { STATS_OFFSET32(no_buff_discard),
9058 4, STATS_FLAGS_FUNC, "rx_discards" },
9059 { STATS_OFFSET32(xxoverflow_discard),
9060 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9061 { STATS_OFFSET32(brb_drop_hi),
9062 8, STATS_FLAGS_PORT, "brb_discard" },
9063 { STATS_OFFSET32(brb_truncate_hi),
9064 8, STATS_FLAGS_PORT, "brb_truncate" },
9065 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9066 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9067 { STATS_OFFSET32(rx_skb_alloc_failed),
9068 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9069 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9070 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9073 #define IS_NOT_E1HMF_STAT(bp, i) \
9074 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9076 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9078 struct bnx2x *bp = netdev_priv(dev);
9081 switch (stringset) {
9083 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9084 if (IS_NOT_E1HMF_STAT(bp, i))
9086 strcpy(buf + j*ETH_GSTRING_LEN,
9087 bnx2x_stats_arr[i].string);
9093 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9098 static int bnx2x_get_stats_count(struct net_device *dev)
9100 struct bnx2x *bp = netdev_priv(dev);
9101 int i, num_stats = 0;
9103 for (i = 0; i < BNX2X_NUM_STATS; i++) {
9104 if (IS_NOT_E1HMF_STAT(bp, i))
9111 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9112 struct ethtool_stats *stats, u64 *buf)
9114 struct bnx2x *bp = netdev_priv(dev);
9115 u32 *hw_stats = (u32 *)&bp->eth_stats;
9118 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9119 if (IS_NOT_E1HMF_STAT(bp, i))
9122 if (bnx2x_stats_arr[i].size == 0) {
9123 /* skip this counter */
9128 if (bnx2x_stats_arr[i].size == 4) {
9129 /* 4-byte counter */
9130 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9134 /* 8-byte counter */
9135 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9136 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9141 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9143 struct bnx2x *bp = netdev_priv(dev);
9144 int port = BP_PORT(bp);
9147 if (!netif_running(dev))
9156 for (i = 0; i < (data * 2); i++) {
9158 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9159 bp->link_params.hw_led_mode,
9160 bp->link_params.chip_id);
9162 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9163 bp->link_params.hw_led_mode,
9164 bp->link_params.chip_id);
9166 msleep_interruptible(500);
9167 if (signal_pending(current))
9171 if (bp->link_vars.link_up)
9172 bnx2x_set_led(bp, port, LED_MODE_OPER,
9173 bp->link_vars.line_speed,
9174 bp->link_params.hw_led_mode,
9175 bp->link_params.chip_id);
9180 static struct ethtool_ops bnx2x_ethtool_ops = {
9181 .get_settings = bnx2x_get_settings,
9182 .set_settings = bnx2x_set_settings,
9183 .get_drvinfo = bnx2x_get_drvinfo,
9184 .get_wol = bnx2x_get_wol,
9185 .set_wol = bnx2x_set_wol,
9186 .get_msglevel = bnx2x_get_msglevel,
9187 .set_msglevel = bnx2x_set_msglevel,
9188 .nway_reset = bnx2x_nway_reset,
9189 .get_link = ethtool_op_get_link,
9190 .get_eeprom_len = bnx2x_get_eeprom_len,
9191 .get_eeprom = bnx2x_get_eeprom,
9192 .set_eeprom = bnx2x_set_eeprom,
9193 .get_coalesce = bnx2x_get_coalesce,
9194 .set_coalesce = bnx2x_set_coalesce,
9195 .get_ringparam = bnx2x_get_ringparam,
9196 .set_ringparam = bnx2x_set_ringparam,
9197 .get_pauseparam = bnx2x_get_pauseparam,
9198 .set_pauseparam = bnx2x_set_pauseparam,
9199 .get_rx_csum = bnx2x_get_rx_csum,
9200 .set_rx_csum = bnx2x_set_rx_csum,
9201 .get_tx_csum = ethtool_op_get_tx_csum,
9202 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9203 .set_flags = bnx2x_set_flags,
9204 .get_flags = ethtool_op_get_flags,
9205 .get_sg = ethtool_op_get_sg,
9206 .set_sg = ethtool_op_set_sg,
9207 .get_tso = ethtool_op_get_tso,
9208 .set_tso = bnx2x_set_tso,
9209 .self_test_count = bnx2x_self_test_count,
9210 .self_test = bnx2x_self_test,
9211 .get_strings = bnx2x_get_strings,
9212 .phys_id = bnx2x_phys_id,
9213 .get_stats_count = bnx2x_get_stats_count,
9214 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9217 /* end of ethtool_ops */
9219 /****************************************************************************
9220 * General service functions
9221 ****************************************************************************/
9223 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9227 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9231 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9232 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9233 PCI_PM_CTRL_PME_STATUS));
9235 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9236 /* delay required during transition out of D3hot */
9241 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9245 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9247 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9250 /* No more memory access after this point until
9251 * device is brought back to D0.
9262 * net_device service functions
9265 static int bnx2x_poll(struct napi_struct *napi, int budget)
9267 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9269 struct bnx2x *bp = fp->bp;
9273 #ifdef BNX2X_STOP_ON_ERROR
9274 if (unlikely(bp->panic))
9278 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9279 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9280 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9282 bnx2x_update_fpsb_idx(fp);
9284 if (BNX2X_HAS_TX_WORK(fp))
9285 bnx2x_tx_int(fp, budget);
9287 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9288 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9290 if (BNX2X_HAS_RX_WORK(fp))
9291 work_done = bnx2x_rx_int(fp, budget);
9293 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9294 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9295 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9298 /* must not complete if we consumed full budget */
9299 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9301 #ifdef BNX2X_STOP_ON_ERROR
9304 netif_rx_complete(napi);
9306 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9307 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9308 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9309 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9315 /* we split the first BD into headers and data BDs
9316 * to ease the pain of our fellow microcode engineers
9317 * we use one mapping for both BDs
9318 * So far this has only been observed to happen
9319 * in Other Operating Systems(TM)
9321 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9322 struct bnx2x_fastpath *fp,
9323 struct eth_tx_bd **tx_bd, u16 hlen,
9324 u16 bd_prod, int nbd)
9326 struct eth_tx_bd *h_tx_bd = *tx_bd;
9327 struct eth_tx_bd *d_tx_bd;
9329 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9331 /* first fix first BD */
9332 h_tx_bd->nbd = cpu_to_le16(nbd);
9333 h_tx_bd->nbytes = cpu_to_le16(hlen);
9335 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9336 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9337 h_tx_bd->addr_lo, h_tx_bd->nbd);
9339 /* now get a new data BD
9340 * (after the pbd) and fill it */
9341 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9342 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9344 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9345 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9347 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9348 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9349 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9351 /* this marks the BD as one that has no individual mapping
9352 * the FW ignores this flag in a BD not marked start
9354 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9355 DP(NETIF_MSG_TX_QUEUED,
9356 "TSO split data size is %d (%x:%x)\n",
9357 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9359 /* update tx_bd for marking the last BD flag */
9365 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9368 csum = (u16) ~csum_fold(csum_sub(csum,
9369 csum_partial(t_header - fix, fix, 0)));
9372 csum = (u16) ~csum_fold(csum_add(csum,
9373 csum_partial(t_header, -fix, 0)));
9375 return swab16(csum);
9378 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9382 if (skb->ip_summed != CHECKSUM_PARTIAL)
9386 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9388 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9389 rc |= XMIT_CSUM_TCP;
9393 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9394 rc |= XMIT_CSUM_TCP;
9398 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9401 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9407 /* check if packet requires linearization (packet is too fragmented) */
9408 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9413 int first_bd_sz = 0;
9415 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9416 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9418 if (xmit_type & XMIT_GSO) {
9419 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9420 /* Check if LSO packet needs to be copied:
9421 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9422 int wnd_size = MAX_FETCH_BD - 3;
9423 /* Number of windows to check */
9424 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9429 /* Headers length */
9430 hlen = (int)(skb_transport_header(skb) - skb->data) +
9433 /* Amount of data (w/o headers) on linear part of SKB*/
9434 first_bd_sz = skb_headlen(skb) - hlen;
9436 wnd_sum = first_bd_sz;
9438 /* Calculate the first sum - it's special */
9439 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9441 skb_shinfo(skb)->frags[frag_idx].size;
9443 /* If there was data on linear skb data - check it */
9444 if (first_bd_sz > 0) {
9445 if (unlikely(wnd_sum < lso_mss)) {
9450 wnd_sum -= first_bd_sz;
9453 /* Others are easier: run through the frag list and
9454 check all windows */
9455 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9457 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9459 if (unlikely(wnd_sum < lso_mss)) {
9464 skb_shinfo(skb)->frags[wnd_idx].size;
9468 /* in non-LSO too fragmented packet should always
9475 if (unlikely(to_copy))
9476 DP(NETIF_MSG_TX_QUEUED,
9477 "Linearization IS REQUIRED for %s packet. "
9478 "num_frags %d hlen %d first_bd_sz %d\n",
9479 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9480 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9485 /* called with netif_tx_lock
9486 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9487 * netif_wake_queue()
9489 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9491 struct bnx2x *bp = netdev_priv(dev);
9492 struct bnx2x_fastpath *fp;
9493 struct sw_tx_bd *tx_buf;
9494 struct eth_tx_bd *tx_bd;
9495 struct eth_tx_parse_bd *pbd = NULL;
9496 u16 pkt_prod, bd_prod;
9499 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9500 int vlan_off = (bp->e1hov ? 4 : 0);
9504 #ifdef BNX2X_STOP_ON_ERROR
9505 if (unlikely(bp->panic))
9506 return NETDEV_TX_BUSY;
9509 fp_index = (smp_processor_id() % bp->num_queues);
9510 fp = &bp->fp[fp_index];
9512 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9513 bp->eth_stats.driver_xoff++,
9514 netif_stop_queue(dev);
9515 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9516 return NETDEV_TX_BUSY;
9519 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9520 " gso type %x xmit_type %x\n",
9521 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9522 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9524 /* First, check if we need to linearize the skb
9525 (due to FW restrictions) */
9526 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9527 /* Statistics of linearization */
9529 if (skb_linearize(skb) != 0) {
9530 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9531 "silently dropping this SKB\n");
9532 dev_kfree_skb_any(skb);
9533 return NETDEV_TX_OK;
9538 Please read carefully. First we use one BD which we mark as start,
9539 then for TSO or xsum we have a parsing info BD,
9540 and only then we have the rest of the TSO BDs.
9541 (don't forget to mark the last one as last,
9542 and to unmap only AFTER you write to the BD ...)
9543 And above all, all pdb sizes are in words - NOT DWORDS!
9546 pkt_prod = fp->tx_pkt_prod++;
9547 bd_prod = TX_BD(fp->tx_bd_prod);
9549 /* get a tx_buf and first BD */
9550 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9551 tx_bd = &fp->tx_desc_ring[bd_prod];
9553 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9554 tx_bd->general_data = (UNICAST_ADDRESS <<
9555 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9557 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9559 /* remember the first BD of the packet */
9560 tx_buf->first_bd = fp->tx_bd_prod;
9563 DP(NETIF_MSG_TX_QUEUED,
9564 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9565 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9567 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9568 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9569 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9572 tx_bd->vlan = cpu_to_le16(pkt_prod);
9575 /* turn on parsing and get a BD */
9576 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9577 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9579 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9582 if (xmit_type & XMIT_CSUM) {
9583 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9585 /* for now NS flag is not used in Linux */
9586 pbd->global_data = (hlen |
9587 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9588 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9590 pbd->ip_hlen = (skb_transport_header(skb) -
9591 skb_network_header(skb)) / 2;
9593 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9595 pbd->total_hlen = cpu_to_le16(hlen);
9596 hlen = hlen*2 - vlan_off;
9598 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9600 if (xmit_type & XMIT_CSUM_V4)
9601 tx_bd->bd_flags.as_bitfield |=
9602 ETH_TX_BD_FLAGS_IP_CSUM;
9604 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9606 if (xmit_type & XMIT_CSUM_TCP) {
9607 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9610 s8 fix = SKB_CS_OFF(skb); /* signed! */
9612 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9613 pbd->cs_offset = fix / 2;
9615 DP(NETIF_MSG_TX_QUEUED,
9616 "hlen %d offset %d fix %d csum before fix %x\n",
9617 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9620 /* HW bug: fixup the CSUM */
9621 pbd->tcp_pseudo_csum =
9622 bnx2x_csum_fix(skb_transport_header(skb),
9625 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9626 pbd->tcp_pseudo_csum);
9630 mapping = pci_map_single(bp->pdev, skb->data,
9631 skb_headlen(skb), PCI_DMA_TODEVICE);
9633 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9634 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9635 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9636 tx_bd->nbd = cpu_to_le16(nbd);
9637 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9639 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9640 " nbytes %d flags %x vlan %x\n",
9641 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9642 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9643 le16_to_cpu(tx_bd->vlan));
9645 if (xmit_type & XMIT_GSO) {
9647 DP(NETIF_MSG_TX_QUEUED,
9648 "TSO packet len %d hlen %d total len %d tso size %d\n",
9649 skb->len, hlen, skb_headlen(skb),
9650 skb_shinfo(skb)->gso_size);
9652 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9654 if (unlikely(skb_headlen(skb) > hlen))
9655 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9658 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9659 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9660 pbd->tcp_flags = pbd_tcp_flags(skb);
9662 if (xmit_type & XMIT_GSO_V4) {
9663 pbd->ip_id = swab16(ip_hdr(skb)->id);
9664 pbd->tcp_pseudo_csum =
9665 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9667 0, IPPROTO_TCP, 0));
9670 pbd->tcp_pseudo_csum =
9671 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9672 &ipv6_hdr(skb)->daddr,
9673 0, IPPROTO_TCP, 0));
9675 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9678 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9679 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9681 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9682 tx_bd = &fp->tx_desc_ring[bd_prod];
9684 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9685 frag->size, PCI_DMA_TODEVICE);
9687 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9688 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9689 tx_bd->nbytes = cpu_to_le16(frag->size);
9690 tx_bd->vlan = cpu_to_le16(pkt_prod);
9691 tx_bd->bd_flags.as_bitfield = 0;
9693 DP(NETIF_MSG_TX_QUEUED,
9694 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9695 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9696 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9699 /* now at last mark the BD as the last BD */
9700 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9702 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9703 tx_bd, tx_bd->bd_flags.as_bitfield);
9705 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9707 /* now send a tx doorbell, counting the next BD
9708 * if the packet contains or ends with it
9710 if (TX_BD_POFF(bd_prod) < nbd)
9714 DP(NETIF_MSG_TX_QUEUED,
9715 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9716 " tcp_flags %x xsum %x seq %u hlen %u\n",
9717 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9718 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9719 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9721 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
9724 * Make sure that the BD data is updated before updating the producer
9725 * since FW might read the BD right after the producer is updated.
9726 * This is only applicable for weak-ordered memory model archs such
9727 * as IA-64. The following barrier is also mandatory since FW will
9728 * assumes packets must have BDs.
9732 fp->hw_tx_prods->bds_prod =
9733 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9734 mb(); /* FW restriction: must not reorder writing nbd and packets */
9735 fp->hw_tx_prods->packets_prod =
9736 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9737 DOORBELL(bp, FP_IDX(fp), 0);
9741 fp->tx_bd_prod += nbd;
9742 dev->trans_start = jiffies;
9744 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9745 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9746 if we put Tx into XOFF state. */
9748 netif_stop_queue(dev);
9749 bp->eth_stats.driver_xoff++;
9750 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9751 netif_wake_queue(dev);
9755 return NETDEV_TX_OK;
9758 /* called with rtnl_lock */
9759 static int bnx2x_open(struct net_device *dev)
9761 struct bnx2x *bp = netdev_priv(dev);
9763 bnx2x_set_power_state(bp, PCI_D0);
9765 return bnx2x_nic_load(bp, LOAD_OPEN);
9768 /* called with rtnl_lock */
9769 static int bnx2x_close(struct net_device *dev)
9771 struct bnx2x *bp = netdev_priv(dev);
9773 /* Unload the driver, release IRQs */
9774 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9775 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9776 if (!CHIP_REV_IS_SLOW(bp))
9777 bnx2x_set_power_state(bp, PCI_D3hot);
9782 /* called with netif_tx_lock from set_multicast */
9783 static void bnx2x_set_rx_mode(struct net_device *dev)
9785 struct bnx2x *bp = netdev_priv(dev);
9786 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9787 int port = BP_PORT(bp);
9789 if (bp->state != BNX2X_STATE_OPEN) {
9790 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9794 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9796 if (dev->flags & IFF_PROMISC)
9797 rx_mode = BNX2X_RX_MODE_PROMISC;
9799 else if ((dev->flags & IFF_ALLMULTI) ||
9800 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9801 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9803 else { /* some multicasts */
9804 if (CHIP_IS_E1(bp)) {
9806 struct dev_mc_list *mclist;
9807 struct mac_configuration_cmd *config =
9808 bnx2x_sp(bp, mcast_config);
9810 for (i = 0, mclist = dev->mc_list;
9811 mclist && (i < dev->mc_count);
9812 i++, mclist = mclist->next) {
9814 config->config_table[i].
9815 cam_entry.msb_mac_addr =
9816 swab16(*(u16 *)&mclist->dmi_addr[0]);
9817 config->config_table[i].
9818 cam_entry.middle_mac_addr =
9819 swab16(*(u16 *)&mclist->dmi_addr[2]);
9820 config->config_table[i].
9821 cam_entry.lsb_mac_addr =
9822 swab16(*(u16 *)&mclist->dmi_addr[4]);
9823 config->config_table[i].cam_entry.flags =
9825 config->config_table[i].
9826 target_table_entry.flags = 0;
9827 config->config_table[i].
9828 target_table_entry.client_id = 0;
9829 config->config_table[i].
9830 target_table_entry.vlan_id = 0;
9833 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9834 config->config_table[i].
9835 cam_entry.msb_mac_addr,
9836 config->config_table[i].
9837 cam_entry.middle_mac_addr,
9838 config->config_table[i].
9839 cam_entry.lsb_mac_addr);
9841 old = config->hdr.length_6b;
9843 for (; i < old; i++) {
9844 if (CAM_IS_INVALID(config->
9846 i--; /* already invalidated */
9850 CAM_INVALIDATE(config->
9855 if (CHIP_REV_IS_SLOW(bp))
9856 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9858 offset = BNX2X_MAX_MULTICAST*(1 + port);
9860 config->hdr.length_6b = i;
9861 config->hdr.offset = offset;
9862 config->hdr.client_id = BP_CL_ID(bp);
9863 config->hdr.reserved1 = 0;
9865 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9866 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9867 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9870 /* Accept one or more multicasts */
9871 struct dev_mc_list *mclist;
9872 u32 mc_filter[MC_HASH_SIZE];
9873 u32 crc, bit, regidx;
9876 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9878 for (i = 0, mclist = dev->mc_list;
9879 mclist && (i < dev->mc_count);
9880 i++, mclist = mclist->next) {
9882 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9885 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9886 bit = (crc >> 24) & 0xff;
9889 mc_filter[regidx] |= (1 << bit);
9892 for (i = 0; i < MC_HASH_SIZE; i++)
9893 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9898 bp->rx_mode = rx_mode;
9899 bnx2x_set_storm_rx_mode(bp);
9902 /* called with rtnl_lock */
9903 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9905 struct sockaddr *addr = p;
9906 struct bnx2x *bp = netdev_priv(dev);
9908 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9911 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9912 if (netif_running(dev)) {
9914 bnx2x_set_mac_addr_e1(bp, 1);
9916 bnx2x_set_mac_addr_e1h(bp, 1);
9922 /* called with rtnl_lock */
9923 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9925 struct mii_ioctl_data *data = if_mii(ifr);
9926 struct bnx2x *bp = netdev_priv(dev);
9927 int port = BP_PORT(bp);
9932 data->phy_id = bp->port.phy_addr;
9939 if (!netif_running(dev))
9942 mutex_lock(&bp->port.phy_mutex);
9943 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9944 DEFAULT_PHY_DEV_ADDR,
9945 (data->reg_num & 0x1f), &mii_regval);
9946 data->val_out = mii_regval;
9947 mutex_unlock(&bp->port.phy_mutex);
9952 if (!capable(CAP_NET_ADMIN))
9955 if (!netif_running(dev))
9958 mutex_lock(&bp->port.phy_mutex);
9959 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
9960 DEFAULT_PHY_DEV_ADDR,
9961 (data->reg_num & 0x1f), data->val_in);
9962 mutex_unlock(&bp->port.phy_mutex);
9973 /* called with rtnl_lock */
9974 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9976 struct bnx2x *bp = netdev_priv(dev);
9979 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9980 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9983 /* This does not race with packet allocation
9984 * because the actual alloc size is
9985 * only updated as part of load
9989 if (netif_running(dev)) {
9990 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9991 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9997 static void bnx2x_tx_timeout(struct net_device *dev)
9999 struct bnx2x *bp = netdev_priv(dev);
10001 #ifdef BNX2X_STOP_ON_ERROR
10005 /* This allows the netif to be shutdown gracefully before resetting */
10006 schedule_work(&bp->reset_task);
10010 /* called with rtnl_lock */
10011 static void bnx2x_vlan_rx_register(struct net_device *dev,
10012 struct vlan_group *vlgrp)
10014 struct bnx2x *bp = netdev_priv(dev);
10017 if (netif_running(dev))
10018 bnx2x_set_client_config(bp);
10023 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10024 static void poll_bnx2x(struct net_device *dev)
10026 struct bnx2x *bp = netdev_priv(dev);
10028 disable_irq(bp->pdev->irq);
10029 bnx2x_interrupt(bp->pdev->irq, dev);
10030 enable_irq(bp->pdev->irq);
10034 static const struct net_device_ops bnx2x_netdev_ops = {
10035 .ndo_open = bnx2x_open,
10036 .ndo_stop = bnx2x_close,
10037 .ndo_start_xmit = bnx2x_start_xmit,
10038 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10039 .ndo_set_mac_address = bnx2x_change_mac_addr,
10040 .ndo_validate_addr = eth_validate_addr,
10041 .ndo_do_ioctl = bnx2x_ioctl,
10042 .ndo_change_mtu = bnx2x_change_mtu,
10043 .ndo_tx_timeout = bnx2x_tx_timeout,
10045 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10047 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10048 .ndo_poll_controller = poll_bnx2x,
10053 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10054 struct net_device *dev)
10059 SET_NETDEV_DEV(dev, &pdev->dev);
10060 bp = netdev_priv(dev);
10065 bp->func = PCI_FUNC(pdev->devfn);
10067 rc = pci_enable_device(pdev);
10069 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10073 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10074 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10077 goto err_out_disable;
10080 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10081 printk(KERN_ERR PFX "Cannot find second PCI device"
10082 " base address, aborting\n");
10084 goto err_out_disable;
10087 if (atomic_read(&pdev->enable_cnt) == 1) {
10088 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10090 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10092 goto err_out_disable;
10095 pci_set_master(pdev);
10096 pci_save_state(pdev);
10099 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10100 if (bp->pm_cap == 0) {
10101 printk(KERN_ERR PFX "Cannot find power management"
10102 " capability, aborting\n");
10104 goto err_out_release;
10107 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10108 if (bp->pcie_cap == 0) {
10109 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10112 goto err_out_release;
10115 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10116 bp->flags |= USING_DAC_FLAG;
10117 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10118 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10119 " failed, aborting\n");
10121 goto err_out_release;
10124 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10125 printk(KERN_ERR PFX "System does not support DMA,"
10128 goto err_out_release;
10131 dev->mem_start = pci_resource_start(pdev, 0);
10132 dev->base_addr = dev->mem_start;
10133 dev->mem_end = pci_resource_end(pdev, 0);
10135 dev->irq = pdev->irq;
10137 bp->regview = pci_ioremap_bar(pdev, 0);
10138 if (!bp->regview) {
10139 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10141 goto err_out_release;
10144 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10145 min_t(u64, BNX2X_DB_SIZE,
10146 pci_resource_len(pdev, 2)));
10147 if (!bp->doorbells) {
10148 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10150 goto err_out_unmap;
10153 bnx2x_set_power_state(bp, PCI_D0);
10155 /* clean indirect addresses */
10156 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10157 PCICFG_VENDOR_ID_OFFSET);
10158 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10159 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10160 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10161 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10163 dev->watchdog_timeo = TX_TIMEOUT;
10165 dev->netdev_ops = &bnx2x_netdev_ops;
10166 dev->ethtool_ops = &bnx2x_ethtool_ops;
10167 dev->features |= NETIF_F_SG;
10168 dev->features |= NETIF_F_HW_CSUM;
10169 if (bp->flags & USING_DAC_FLAG)
10170 dev->features |= NETIF_F_HIGHDMA;
10172 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10174 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10175 dev->features |= NETIF_F_TSO6;
10181 iounmap(bp->regview);
10182 bp->regview = NULL;
10184 if (bp->doorbells) {
10185 iounmap(bp->doorbells);
10186 bp->doorbells = NULL;
10190 if (atomic_read(&pdev->enable_cnt) == 1)
10191 pci_release_regions(pdev);
10194 pci_disable_device(pdev);
10195 pci_set_drvdata(pdev, NULL);
10201 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10203 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10205 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10209 /* return value of 1=2.5GHz 2=5GHz */
10210 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10212 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10214 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10218 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10219 const struct pci_device_id *ent)
10221 static int version_printed;
10222 struct net_device *dev = NULL;
10226 if (version_printed++ == 0)
10227 printk(KERN_INFO "%s", version);
10229 /* dev zeroed in init_etherdev */
10230 dev = alloc_etherdev(sizeof(*bp));
10232 printk(KERN_ERR PFX "Cannot allocate net device\n");
10236 bp = netdev_priv(dev);
10237 bp->msglevel = debug;
10239 rc = bnx2x_init_dev(pdev, dev);
10245 rc = register_netdev(dev);
10247 dev_err(&pdev->dev, "Cannot register net device\n");
10248 goto init_one_exit;
10251 pci_set_drvdata(pdev, dev);
10253 rc = bnx2x_init_bp(bp);
10255 unregister_netdev(dev);
10256 goto init_one_exit;
10259 netif_carrier_off(dev);
10261 bp->common.name = board_info[ent->driver_data].name;
10262 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10263 " IRQ %d, ", dev->name, bp->common.name,
10264 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10265 bnx2x_get_pcie_width(bp),
10266 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10267 dev->base_addr, bp->pdev->irq);
10268 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10273 iounmap(bp->regview);
10276 iounmap(bp->doorbells);
10280 if (atomic_read(&pdev->enable_cnt) == 1)
10281 pci_release_regions(pdev);
10283 pci_disable_device(pdev);
10284 pci_set_drvdata(pdev, NULL);
10289 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10291 struct net_device *dev = pci_get_drvdata(pdev);
10295 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10298 bp = netdev_priv(dev);
10300 unregister_netdev(dev);
10303 iounmap(bp->regview);
10306 iounmap(bp->doorbells);
10310 if (atomic_read(&pdev->enable_cnt) == 1)
10311 pci_release_regions(pdev);
10313 pci_disable_device(pdev);
10314 pci_set_drvdata(pdev, NULL);
10317 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10319 struct net_device *dev = pci_get_drvdata(pdev);
10323 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10326 bp = netdev_priv(dev);
10330 pci_save_state(pdev);
10332 if (!netif_running(dev)) {
10337 netif_device_detach(dev);
10339 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10341 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10348 static int bnx2x_resume(struct pci_dev *pdev)
10350 struct net_device *dev = pci_get_drvdata(pdev);
10355 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10358 bp = netdev_priv(dev);
10362 pci_restore_state(pdev);
10364 if (!netif_running(dev)) {
10369 bnx2x_set_power_state(bp, PCI_D0);
10370 netif_device_attach(dev);
10372 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10379 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10383 bp->state = BNX2X_STATE_ERROR;
10385 bp->rx_mode = BNX2X_RX_MODE_NONE;
10387 bnx2x_netif_stop(bp, 0);
10389 del_timer_sync(&bp->timer);
10390 bp->stats_state = STATS_STATE_DISABLED;
10391 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10394 bnx2x_free_irq(bp);
10396 if (CHIP_IS_E1(bp)) {
10397 struct mac_configuration_cmd *config =
10398 bnx2x_sp(bp, mcast_config);
10400 for (i = 0; i < config->hdr.length_6b; i++)
10401 CAM_INVALIDATE(config->config_table[i]);
10404 /* Free SKBs, SGEs, TPA pool and driver internals */
10405 bnx2x_free_skbs(bp);
10406 for_each_queue(bp, i)
10407 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10408 bnx2x_free_mem(bp);
10410 bp->state = BNX2X_STATE_CLOSED;
10412 netif_carrier_off(bp->dev);
10417 static void bnx2x_eeh_recover(struct bnx2x *bp)
10421 mutex_init(&bp->port.phy_mutex);
10423 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10424 bp->link_params.shmem_base = bp->common.shmem_base;
10425 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10427 if (!bp->common.shmem_base ||
10428 (bp->common.shmem_base < 0xA0000) ||
10429 (bp->common.shmem_base >= 0xC0000)) {
10430 BNX2X_DEV_INFO("MCP not active\n");
10431 bp->flags |= NO_MCP_FLAG;
10435 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10436 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10437 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10438 BNX2X_ERR("BAD MCP validity signature\n");
10440 if (!BP_NOMCP(bp)) {
10441 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10442 & DRV_MSG_SEQ_NUMBER_MASK);
10443 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10448 * bnx2x_io_error_detected - called when PCI error is detected
10449 * @pdev: Pointer to PCI device
10450 * @state: The current pci connection state
10452 * This function is called after a PCI bus error affecting
10453 * this device has been detected.
10455 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10456 pci_channel_state_t state)
10458 struct net_device *dev = pci_get_drvdata(pdev);
10459 struct bnx2x *bp = netdev_priv(dev);
10463 netif_device_detach(dev);
10465 if (netif_running(dev))
10466 bnx2x_eeh_nic_unload(bp);
10468 pci_disable_device(pdev);
10472 /* Request a slot reset */
10473 return PCI_ERS_RESULT_NEED_RESET;
10477 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10478 * @pdev: Pointer to PCI device
10480 * Restart the card from scratch, as if from a cold-boot.
10482 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10484 struct net_device *dev = pci_get_drvdata(pdev);
10485 struct bnx2x *bp = netdev_priv(dev);
10489 if (pci_enable_device(pdev)) {
10490 dev_err(&pdev->dev,
10491 "Cannot re-enable PCI device after reset\n");
10493 return PCI_ERS_RESULT_DISCONNECT;
10496 pci_set_master(pdev);
10497 pci_restore_state(pdev);
10499 if (netif_running(dev))
10500 bnx2x_set_power_state(bp, PCI_D0);
10504 return PCI_ERS_RESULT_RECOVERED;
10508 * bnx2x_io_resume - called when traffic can start flowing again
10509 * @pdev: Pointer to PCI device
10511 * This callback is called when the error recovery driver tells us that
10512 * its OK to resume normal operation.
10514 static void bnx2x_io_resume(struct pci_dev *pdev)
10516 struct net_device *dev = pci_get_drvdata(pdev);
10517 struct bnx2x *bp = netdev_priv(dev);
10521 bnx2x_eeh_recover(bp);
10523 if (netif_running(dev))
10524 bnx2x_nic_load(bp, LOAD_NORMAL);
10526 netif_device_attach(dev);
10531 static struct pci_error_handlers bnx2x_err_handler = {
10532 .error_detected = bnx2x_io_error_detected,
10533 .slot_reset = bnx2x_io_slot_reset,
10534 .resume = bnx2x_io_resume,
10537 static struct pci_driver bnx2x_pci_driver = {
10538 .name = DRV_MODULE_NAME,
10539 .id_table = bnx2x_pci_tbl,
10540 .probe = bnx2x_init_one,
10541 .remove = __devexit_p(bnx2x_remove_one),
10542 .suspend = bnx2x_suspend,
10543 .resume = bnx2x_resume,
10544 .err_handler = &bnx2x_err_handler,
10547 static int __init bnx2x_init(void)
10549 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10550 if (bnx2x_wq == NULL) {
10551 printk(KERN_ERR PFX "Cannot create workqueue\n");
10555 return pci_register_driver(&bnx2x_pci_driver);
10558 static void __exit bnx2x_cleanup(void)
10560 pci_unregister_driver(&bnx2x_pci_driver);
10562 destroy_workqueue(bnx2x_wq);
10565 module_init(bnx2x_init);
10566 module_exit(bnx2x_cleanup);