1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
57 #define DRV_MODULE_VERSION "1.48.102"
58 #define DRV_MODULE_RELDATE "2009/02/12"
59 #define BNX2X_BC_VER 0x040200
61 /* Time in jiffies before concluding the transmitter is hung */
62 #define TX_TIMEOUT (5*HZ)
64 static char version[] __devinitdata =
65 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
66 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
68 MODULE_AUTHOR("Eliezer Tamir");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
73 static int multi_mode = 1;
74 module_param(multi_mode, int, 0);
75 MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
77 static int disable_tpa;
78 module_param(disable_tpa, int, 0);
79 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
82 module_param(int_mode, int, 0);
83 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
86 module_param(poll, int, 0);
87 MODULE_PARM_DESC(poll, " Use polling (for debug)");
90 module_param(mrrs, int, 0);
91 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
94 module_param(debug, int, 0);
95 MODULE_PARM_DESC(debug, " Default debug msglevel");
97 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
99 static struct workqueue_struct *bnx2x_wq;
101 enum bnx2x_board_type {
107 /* indexed by board_type, above */
110 } board_info[] __devinitdata = {
111 { "Broadcom NetXtreme II BCM57710 XGb" },
112 { "Broadcom NetXtreme II BCM57711 XGb" },
113 { "Broadcom NetXtreme II BCM57711E XGb" }
117 static const struct pci_device_id bnx2x_pci_tbl[] = {
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
127 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
129 /****************************************************************************
130 * General service functions
131 ****************************************************************************/
134 * locking is done by mcp
136 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
140 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
141 PCICFG_VENDOR_ID_OFFSET);
144 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
149 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
150 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
151 PCICFG_VENDOR_ID_OFFSET);
156 static const u32 dmae_reg_go_c[] = {
157 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
158 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
159 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
160 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
163 /* copy command into DMAE command memory and set DMAE command go */
164 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
170 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
171 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
172 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
174 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
175 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
177 REG_WR(bp, dmae_reg_go_c[idx], 1);
180 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
183 struct dmae_command *dmae = &bp->init_dmae;
184 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
187 if (!bp->dmae_ready) {
188 u32 *data = bnx2x_sp(bp, wb_data[0]);
190 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
191 " using indirect\n", dst_addr, len32);
192 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
196 mutex_lock(&bp->dmae_mutex);
198 memset(dmae, 0, sizeof(struct dmae_command));
200 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
201 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
202 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
204 DMAE_CMD_ENDIANITY_B_DW_SWAP |
206 DMAE_CMD_ENDIANITY_DW_SWAP |
208 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
209 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
210 dmae->src_addr_lo = U64_LO(dma_addr);
211 dmae->src_addr_hi = U64_HI(dma_addr);
212 dmae->dst_addr_lo = dst_addr >> 2;
213 dmae->dst_addr_hi = 0;
215 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
216 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
217 dmae->comp_val = DMAE_COMP_VAL;
219 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
220 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
221 "dst_addr [%x:%08x (%08x)]\n"
222 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
223 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
224 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
225 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
226 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
227 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
228 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
232 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
236 while (*wb_comp != DMAE_COMP_VAL) {
237 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
240 BNX2X_ERR("dmae timeout!\n");
244 /* adjust delay for emulation/FPGA */
245 if (CHIP_REV_IS_SLOW(bp))
251 mutex_unlock(&bp->dmae_mutex);
254 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
256 struct dmae_command *dmae = &bp->init_dmae;
257 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
260 if (!bp->dmae_ready) {
261 u32 *data = bnx2x_sp(bp, wb_data[0]);
264 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
265 " using indirect\n", src_addr, len32);
266 for (i = 0; i < len32; i++)
267 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
271 mutex_lock(&bp->dmae_mutex);
273 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
274 memset(dmae, 0, sizeof(struct dmae_command));
276 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
277 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
278 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
280 DMAE_CMD_ENDIANITY_B_DW_SWAP |
282 DMAE_CMD_ENDIANITY_DW_SWAP |
284 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
285 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
286 dmae->src_addr_lo = src_addr >> 2;
287 dmae->src_addr_hi = 0;
288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
291 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
292 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
293 dmae->comp_val = DMAE_COMP_VAL;
295 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
296 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
297 "dst_addr [%x:%08x (%08x)]\n"
298 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
299 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
300 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
301 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
305 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
309 while (*wb_comp != DMAE_COMP_VAL) {
312 BNX2X_ERR("dmae timeout!\n");
316 /* adjust delay for emulation/FPGA */
317 if (CHIP_REV_IS_SLOW(bp))
322 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
323 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
324 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
326 mutex_unlock(&bp->dmae_mutex);
329 /* used only for slowpath so not inlined */
330 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
334 wb_write[0] = val_hi;
335 wb_write[1] = val_lo;
336 REG_WR_DMAE(bp, reg, wb_write, 2);
340 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
344 REG_RD_DMAE(bp, reg, wb_data, 2);
346 return HILO_U64(wb_data[0], wb_data[1]);
350 static int bnx2x_mc_assert(struct bnx2x *bp)
354 u32 row0, row1, row2, row3;
357 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
358 XSTORM_ASSERT_LIST_INDEX_OFFSET);
360 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
362 /* print the asserts */
363 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
365 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
366 XSTORM_ASSERT_LIST_OFFSET(i));
367 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
368 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
369 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
370 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
371 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
372 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
374 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
375 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
376 " 0x%08x 0x%08x 0x%08x\n",
377 i, row3, row2, row1, row0);
385 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
386 TSTORM_ASSERT_LIST_INDEX_OFFSET);
388 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
390 /* print the asserts */
391 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
393 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
394 TSTORM_ASSERT_LIST_OFFSET(i));
395 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
396 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
397 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
398 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
399 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
400 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
402 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
403 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
404 " 0x%08x 0x%08x 0x%08x\n",
405 i, row3, row2, row1, row0);
413 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
414 CSTORM_ASSERT_LIST_INDEX_OFFSET);
416 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
418 /* print the asserts */
419 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
421 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
422 CSTORM_ASSERT_LIST_OFFSET(i));
423 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
424 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
425 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
426 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
427 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
428 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
430 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
431 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
432 " 0x%08x 0x%08x 0x%08x\n",
433 i, row3, row2, row1, row0);
441 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
442 USTORM_ASSERT_LIST_INDEX_OFFSET);
444 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
446 /* print the asserts */
447 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
449 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
450 USTORM_ASSERT_LIST_OFFSET(i));
451 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
452 USTORM_ASSERT_LIST_OFFSET(i) + 4);
453 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
454 USTORM_ASSERT_LIST_OFFSET(i) + 8);
455 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
456 USTORM_ASSERT_LIST_OFFSET(i) + 12);
458 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
459 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
460 " 0x%08x 0x%08x 0x%08x\n",
461 i, row3, row2, row1, row0);
471 static void bnx2x_fw_dump(struct bnx2x *bp)
477 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
478 mark = ((mark + 0x3) & ~0x3);
479 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
481 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
482 for (word = 0; word < 8; word++)
483 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
486 printk(KERN_CONT "%s", (char *)data);
488 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
489 for (word = 0; word < 8; word++)
490 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
493 printk(KERN_CONT "%s", (char *)data);
495 printk("\n" KERN_ERR PFX "end of fw dump\n");
498 static void bnx2x_panic_dump(struct bnx2x *bp)
503 bp->stats_state = STATS_STATE_DISABLED;
504 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
506 BNX2X_ERR("begin crash dump -----------------\n");
510 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
511 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
512 " spq_prod_idx(%u)\n",
513 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
514 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
517 for_each_rx_queue(bp, i) {
518 struct bnx2x_fastpath *fp = &bp->fp[i];
520 BNX2X_ERR("queue[%d]: rx_bd_prod(%x) rx_bd_cons(%x)"
521 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
522 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
523 i, fp->rx_bd_prod, fp->rx_bd_cons,
524 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
525 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
526 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
527 " fp_u_idx(%x) *sb_u_idx(%x)\n",
528 fp->rx_sge_prod, fp->last_max_sge,
529 le16_to_cpu(fp->fp_u_idx),
530 fp->status_blk->u_status_block.status_block_index);
534 for_each_tx_queue(bp, i) {
535 struct bnx2x_fastpath *fp = &bp->fp[i];
536 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
538 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
539 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
540 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
541 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
542 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
543 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
544 fp->status_blk->c_status_block.status_block_index,
545 hw_prods->packets_prod, hw_prods->bds_prod);
550 for_each_rx_queue(bp, i) {
551 struct bnx2x_fastpath *fp = &bp->fp[i];
553 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
554 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
555 for (j = start; j != end; j = RX_BD(j + 1)) {
556 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
557 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
559 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
560 j, rx_bd[1], rx_bd[0], sw_bd->skb);
563 start = RX_SGE(fp->rx_sge_prod);
564 end = RX_SGE(fp->last_max_sge);
565 for (j = start; j != end; j = RX_SGE(j + 1)) {
566 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
567 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
569 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
570 j, rx_sge[1], rx_sge[0], sw_page->page);
573 start = RCQ_BD(fp->rx_comp_cons - 10);
574 end = RCQ_BD(fp->rx_comp_cons + 503);
575 for (j = start; j != end; j = RCQ_BD(j + 1)) {
576 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
578 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
579 j, cqe[0], cqe[1], cqe[2], cqe[3]);
584 for_each_tx_queue(bp, i) {
585 struct bnx2x_fastpath *fp = &bp->fp[i];
587 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
588 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
589 for (j = start; j != end; j = TX_BD(j + 1)) {
590 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
592 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
593 sw_bd->skb, sw_bd->first_bd);
596 start = TX_BD(fp->tx_bd_cons - 10);
597 end = TX_BD(fp->tx_bd_cons + 254);
598 for (j = start; j != end; j = TX_BD(j + 1)) {
599 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
601 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
602 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
608 BNX2X_ERR("end crash dump -----------------\n");
611 static void bnx2x_int_enable(struct bnx2x *bp)
613 int port = BP_PORT(bp);
614 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
615 u32 val = REG_RD(bp, addr);
616 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
617 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
620 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
621 HC_CONFIG_0_REG_INT_LINE_EN_0);
622 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
623 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
625 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
626 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
627 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
628 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
630 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
631 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
632 HC_CONFIG_0_REG_INT_LINE_EN_0 |
633 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
635 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
638 REG_WR(bp, addr, val);
640 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
644 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
646 REG_WR(bp, addr, val);
648 if (CHIP_IS_E1H(bp)) {
649 /* init leading/trailing edge */
651 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
653 /* enable nig and gpio3 attention */
658 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
659 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
663 static void bnx2x_int_disable(struct bnx2x *bp)
665 int port = BP_PORT(bp);
666 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
667 u32 val = REG_RD(bp, addr);
669 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
670 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
671 HC_CONFIG_0_REG_INT_LINE_EN_0 |
672 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
674 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
677 /* flush all outstanding writes */
680 REG_WR(bp, addr, val);
681 if (REG_RD(bp, addr) != val)
682 BNX2X_ERR("BUG! proper val not read from IGU!\n");
686 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
688 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
691 /* disable interrupt handling */
692 atomic_inc(&bp->intr_sem);
694 /* prevent the HW from sending interrupts */
695 bnx2x_int_disable(bp);
697 /* make sure all ISRs are done */
699 synchronize_irq(bp->msix_table[0].vector);
701 for_each_queue(bp, i)
702 synchronize_irq(bp->msix_table[i + offset].vector);
704 synchronize_irq(bp->pdev->irq);
706 /* make sure sp_task is not running */
707 cancel_delayed_work(&bp->sp_task);
708 flush_workqueue(bnx2x_wq);
714 * General service functions
717 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
718 u8 storm, u16 index, u8 op, u8 update)
720 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
721 COMMAND_REG_INT_ACK);
722 struct igu_ack_register igu_ack;
724 igu_ack.status_block_index = index;
725 igu_ack.sb_id_and_flags =
726 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
727 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
728 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
729 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
731 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
732 (*(u32 *)&igu_ack), hc_addr);
733 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
736 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
738 struct host_status_block *fpsb = fp->status_blk;
741 barrier(); /* status block is written to by the chip */
742 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
743 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
746 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
747 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
753 static u16 bnx2x_ack_int(struct bnx2x *bp)
755 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
756 COMMAND_REG_SIMD_MASK);
757 u32 result = REG_RD(bp, hc_addr);
759 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
767 * fast path service functions
770 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
774 /* Tell compiler that status block fields can change */
776 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
777 return (fp->tx_pkt_cons != tx_cons_sb);
780 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
782 /* Tell compiler that consumer and producer can change */
784 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
787 /* free skb in the packet ring at pos idx
788 * return idx of last bd freed
790 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
793 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
794 struct eth_tx_bd *tx_bd;
795 struct sk_buff *skb = tx_buf->skb;
796 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
799 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
803 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
804 tx_bd = &fp->tx_desc_ring[bd_idx];
805 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
806 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
808 nbd = le16_to_cpu(tx_bd->nbd) - 1;
809 new_cons = nbd + tx_buf->first_bd;
810 #ifdef BNX2X_STOP_ON_ERROR
811 if (nbd > (MAX_SKB_FRAGS + 2)) {
812 BNX2X_ERR("BAD nbd!\n");
817 /* Skip a parse bd and the TSO split header bd
818 since they have no mapping */
820 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
822 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
823 ETH_TX_BD_FLAGS_TCP_CSUM |
824 ETH_TX_BD_FLAGS_SW_LSO)) {
826 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
827 tx_bd = &fp->tx_desc_ring[bd_idx];
828 /* is this a TSO split header bd? */
829 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
831 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
838 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
839 tx_bd = &fp->tx_desc_ring[bd_idx];
840 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
841 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
843 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
849 tx_buf->first_bd = 0;
855 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
861 barrier(); /* Tell compiler that prod and cons can change */
862 prod = fp->tx_bd_prod;
863 cons = fp->tx_bd_cons;
865 /* NUM_TX_RINGS = number of "next-page" entries
866 It will be used as a threshold */
867 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
869 #ifdef BNX2X_STOP_ON_ERROR
871 WARN_ON(used > fp->bp->tx_ring_size);
872 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
875 return (s16)(fp->bp->tx_ring_size) - used;
878 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
880 struct bnx2x *bp = fp->bp;
881 struct netdev_queue *txq;
882 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
885 #ifdef BNX2X_STOP_ON_ERROR
886 if (unlikely(bp->panic))
890 txq = netdev_get_tx_queue(bp->dev, fp->index);
891 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
892 sw_cons = fp->tx_pkt_cons;
894 while (sw_cons != hw_cons) {
897 pkt_cons = TX_BD(sw_cons);
899 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
901 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
902 hw_cons, sw_cons, pkt_cons);
904 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
906 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
909 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
914 fp->tx_pkt_cons = sw_cons;
915 fp->tx_bd_cons = bd_cons;
917 /* TBD need a thresh? */
918 if (unlikely(netif_tx_queue_stopped(txq))) {
920 __netif_tx_lock(txq, smp_processor_id());
922 /* Need to make the tx_bd_cons update visible to start_xmit()
923 * before checking for netif_tx_queue_stopped(). Without the
924 * memory barrier, there is a small possibility that
925 * start_xmit() will miss it and cause the queue to be stopped
930 if ((netif_tx_queue_stopped(txq)) &&
931 (bp->state == BNX2X_STATE_OPEN) &&
932 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
933 netif_tx_wake_queue(txq);
935 __netif_tx_unlock(txq);
940 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
941 union eth_rx_cqe *rr_cqe)
943 struct bnx2x *bp = fp->bp;
944 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
945 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
948 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
949 fp->index, cid, command, bp->state,
950 rr_cqe->ramrod_cqe.ramrod_type);
955 switch (command | fp->state) {
956 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
957 BNX2X_FP_STATE_OPENING):
958 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
960 fp->state = BNX2X_FP_STATE_OPEN;
963 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
964 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
966 fp->state = BNX2X_FP_STATE_HALTED;
970 BNX2X_ERR("unexpected MC reply (%d) "
971 "fp->state is %x\n", command, fp->state);
974 mb(); /* force bnx2x_wait_ramrod() to see the change */
978 switch (command | bp->state) {
979 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
980 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
981 bp->state = BNX2X_STATE_OPEN;
984 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
985 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
986 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
987 fp->state = BNX2X_FP_STATE_HALTED;
990 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
991 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
992 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
996 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
997 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
998 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
999 bp->set_mac_pending = 0;
1002 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1003 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1007 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1008 command, bp->state);
1011 mb(); /* force bnx2x_wait_ramrod() to see the change */
1014 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1015 struct bnx2x_fastpath *fp, u16 index)
1017 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1018 struct page *page = sw_buf->page;
1019 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1021 /* Skip "next page" elements */
1025 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1026 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1027 __free_pages(page, PAGES_PER_SGE_SHIFT);
1029 sw_buf->page = NULL;
1034 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1035 struct bnx2x_fastpath *fp, int last)
1039 for (i = 0; i < last; i++)
1040 bnx2x_free_rx_sge(bp, fp, i);
1043 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1044 struct bnx2x_fastpath *fp, u16 index)
1046 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1047 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1048 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1051 if (unlikely(page == NULL))
1054 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1055 PCI_DMA_FROMDEVICE);
1056 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1057 __free_pages(page, PAGES_PER_SGE_SHIFT);
1061 sw_buf->page = page;
1062 pci_unmap_addr_set(sw_buf, mapping, mapping);
1064 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1065 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1070 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1071 struct bnx2x_fastpath *fp, u16 index)
1073 struct sk_buff *skb;
1074 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1075 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1078 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1079 if (unlikely(skb == NULL))
1082 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1083 PCI_DMA_FROMDEVICE);
1084 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1090 pci_unmap_addr_set(rx_buf, mapping, mapping);
1092 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1093 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1098 /* note that we are not allocating a new skb,
1099 * we are just moving one from cons to prod
1100 * we are not creating a new mapping,
1101 * so there is no need to check for dma_mapping_error().
1103 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1104 struct sk_buff *skb, u16 cons, u16 prod)
1106 struct bnx2x *bp = fp->bp;
1107 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1108 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1109 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1110 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1112 pci_dma_sync_single_for_device(bp->pdev,
1113 pci_unmap_addr(cons_rx_buf, mapping),
1114 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1116 prod_rx_buf->skb = cons_rx_buf->skb;
1117 pci_unmap_addr_set(prod_rx_buf, mapping,
1118 pci_unmap_addr(cons_rx_buf, mapping));
1119 *prod_bd = *cons_bd;
1122 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1125 u16 last_max = fp->last_max_sge;
1127 if (SUB_S16(idx, last_max) > 0)
1128 fp->last_max_sge = idx;
1131 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1135 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1136 int idx = RX_SGE_CNT * i - 1;
1138 for (j = 0; j < 2; j++) {
1139 SGE_MASK_CLEAR_BIT(fp, idx);
1145 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1146 struct eth_fast_path_rx_cqe *fp_cqe)
1148 struct bnx2x *bp = fp->bp;
1149 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1150 le16_to_cpu(fp_cqe->len_on_bd)) >>
1152 u16 last_max, last_elem, first_elem;
1159 /* First mark all used pages */
1160 for (i = 0; i < sge_len; i++)
1161 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1163 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1164 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1166 /* Here we assume that the last SGE index is the biggest */
1167 prefetch((void *)(fp->sge_mask));
1168 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1170 last_max = RX_SGE(fp->last_max_sge);
1171 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1172 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1174 /* If ring is not full */
1175 if (last_elem + 1 != first_elem)
1178 /* Now update the prod */
1179 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1180 if (likely(fp->sge_mask[i]))
1183 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1184 delta += RX_SGE_MASK_ELEM_SZ;
1188 fp->rx_sge_prod += delta;
1189 /* clear page-end entries */
1190 bnx2x_clear_sge_mask_next_elems(fp);
1193 DP(NETIF_MSG_RX_STATUS,
1194 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1195 fp->last_max_sge, fp->rx_sge_prod);
1198 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1200 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1201 memset(fp->sge_mask, 0xff,
1202 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1204 /* Clear the two last indices in the page to 1:
1205 these are the indices that correspond to the "next" element,
1206 hence will never be indicated and should be removed from
1207 the calculations. */
1208 bnx2x_clear_sge_mask_next_elems(fp);
1211 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1212 struct sk_buff *skb, u16 cons, u16 prod)
1214 struct bnx2x *bp = fp->bp;
1215 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1216 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1217 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1220 /* move empty skb from pool to prod and map it */
1221 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1222 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1223 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1224 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1226 /* move partial skb from cons to pool (don't unmap yet) */
1227 fp->tpa_pool[queue] = *cons_rx_buf;
1229 /* mark bin state as start - print error if current state != stop */
1230 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1231 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1233 fp->tpa_state[queue] = BNX2X_TPA_START;
1235 /* point prod_bd to new skb */
1236 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1237 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1239 #ifdef BNX2X_STOP_ON_ERROR
1240 fp->tpa_queue_used |= (1 << queue);
1241 #ifdef __powerpc64__
1242 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1244 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1246 fp->tpa_queue_used);
1250 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1251 struct sk_buff *skb,
1252 struct eth_fast_path_rx_cqe *fp_cqe,
1255 struct sw_rx_page *rx_pg, old_rx_pg;
1256 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1257 u32 i, frag_len, frag_size, pages;
1261 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1262 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1264 /* This is needed in order to enable forwarding support */
1266 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1267 max(frag_size, (u32)len_on_bd));
1269 #ifdef BNX2X_STOP_ON_ERROR
1271 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1272 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1274 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1275 fp_cqe->pkt_len, len_on_bd);
1281 /* Run through the SGL and compose the fragmented skb */
1282 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1283 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1285 /* FW gives the indices of the SGE as if the ring is an array
1286 (meaning that "next" element will consume 2 indices) */
1287 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1288 rx_pg = &fp->rx_page_ring[sge_idx];
1291 /* If we fail to allocate a substitute page, we simply stop
1292 where we are and drop the whole packet */
1293 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1294 if (unlikely(err)) {
1295 fp->eth_q_stats.rx_skb_alloc_failed++;
1299 /* Unmap the page as we r going to pass it to the stack */
1300 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1301 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1303 /* Add one frag and update the appropriate fields in the skb */
1304 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1306 skb->data_len += frag_len;
1307 skb->truesize += frag_len;
1308 skb->len += frag_len;
1310 frag_size -= frag_len;
1316 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1317 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1320 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1321 struct sk_buff *skb = rx_buf->skb;
1323 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1325 /* Unmap skb in the pool anyway, as we are going to change
1326 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1328 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1329 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1331 if (likely(new_skb)) {
1332 /* fix ip xsum and give it to the stack */
1333 /* (no need to map the new skb) */
1336 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1337 PARSING_FLAGS_VLAN);
1338 int is_not_hwaccel_vlan_cqe =
1339 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1343 prefetch(((char *)(skb)) + 128);
1345 #ifdef BNX2X_STOP_ON_ERROR
1346 if (pad + len > bp->rx_buf_size) {
1347 BNX2X_ERR("skb_put is about to fail... "
1348 "pad %d len %d rx_buf_size %d\n",
1349 pad, len, bp->rx_buf_size);
1355 skb_reserve(skb, pad);
1358 skb->protocol = eth_type_trans(skb, bp->dev);
1359 skb->ip_summed = CHECKSUM_UNNECESSARY;
1364 iph = (struct iphdr *)skb->data;
1366 /* If there is no Rx VLAN offloading -
1367 take VLAN tag into an account */
1368 if (unlikely(is_not_hwaccel_vlan_cqe))
1369 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1372 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1375 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1376 &cqe->fast_path_cqe, cqe_idx)) {
1378 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1379 (!is_not_hwaccel_vlan_cqe))
1380 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1381 le16_to_cpu(cqe->fast_path_cqe.
1385 netif_receive_skb(skb);
1387 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1388 " - dropping packet!\n");
1393 /* put new skb in bin */
1394 fp->tpa_pool[queue].skb = new_skb;
1397 /* else drop the packet and keep the buffer in the bin */
1398 DP(NETIF_MSG_RX_STATUS,
1399 "Failed to allocate new skb - dropping packet!\n");
1400 fp->eth_q_stats.rx_skb_alloc_failed++;
1403 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1406 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1407 struct bnx2x_fastpath *fp,
1408 u16 bd_prod, u16 rx_comp_prod,
1411 struct ustorm_eth_rx_producers rx_prods = {0};
1414 /* Update producers */
1415 rx_prods.bd_prod = bd_prod;
1416 rx_prods.cqe_prod = rx_comp_prod;
1417 rx_prods.sge_prod = rx_sge_prod;
1420 * Make sure that the BD and SGE data is updated before updating the
1421 * producers since FW might read the BD/SGE right after the producer
1423 * This is only applicable for weak-ordered memory model archs such
1424 * as IA-64. The following barrier is also mandatory since FW will
1425 * assumes BDs must have buffers.
1429 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1430 REG_WR(bp, BAR_USTRORM_INTMEM +
1431 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1432 ((u32 *)&rx_prods)[i]);
1434 mmiowb(); /* keep prod updates ordered */
1436 DP(NETIF_MSG_RX_STATUS,
1437 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1438 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1441 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1443 struct bnx2x *bp = fp->bp;
1444 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1445 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1448 #ifdef BNX2X_STOP_ON_ERROR
1449 if (unlikely(bp->panic))
1453 /* CQ "next element" is of the size of the regular element,
1454 that's why it's ok here */
1455 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1456 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1459 bd_cons = fp->rx_bd_cons;
1460 bd_prod = fp->rx_bd_prod;
1461 bd_prod_fw = bd_prod;
1462 sw_comp_cons = fp->rx_comp_cons;
1463 sw_comp_prod = fp->rx_comp_prod;
1465 /* Memory barrier necessary as speculative reads of the rx
1466 * buffer can be ahead of the index in the status block
1470 DP(NETIF_MSG_RX_STATUS,
1471 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1472 fp->index, hw_comp_cons, sw_comp_cons);
1474 while (sw_comp_cons != hw_comp_cons) {
1475 struct sw_rx_bd *rx_buf = NULL;
1476 struct sk_buff *skb;
1477 union eth_rx_cqe *cqe;
1481 comp_ring_cons = RCQ_BD(sw_comp_cons);
1482 bd_prod = RX_BD(bd_prod);
1483 bd_cons = RX_BD(bd_cons);
1485 cqe = &fp->rx_comp_ring[comp_ring_cons];
1486 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1488 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1489 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1490 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1491 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1492 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1493 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1495 /* is this a slowpath msg? */
1496 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1497 bnx2x_sp_event(fp, cqe);
1500 /* this is an rx packet */
1502 rx_buf = &fp->rx_buf_ring[bd_cons];
1504 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1505 pad = cqe->fast_path_cqe.placement_offset;
1507 /* If CQE is marked both TPA_START and TPA_END
1508 it is a non-TPA CQE */
1509 if ((!fp->disable_tpa) &&
1510 (TPA_TYPE(cqe_fp_flags) !=
1511 (TPA_TYPE_START | TPA_TYPE_END))) {
1512 u16 queue = cqe->fast_path_cqe.queue_index;
1514 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1515 DP(NETIF_MSG_RX_STATUS,
1516 "calling tpa_start on queue %d\n",
1519 bnx2x_tpa_start(fp, queue, skb,
1524 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1525 DP(NETIF_MSG_RX_STATUS,
1526 "calling tpa_stop on queue %d\n",
1529 if (!BNX2X_RX_SUM_FIX(cqe))
1530 BNX2X_ERR("STOP on none TCP "
1533 /* This is a size of the linear data
1535 len = le16_to_cpu(cqe->fast_path_cqe.
1537 bnx2x_tpa_stop(bp, fp, queue, pad,
1538 len, cqe, comp_ring_cons);
1539 #ifdef BNX2X_STOP_ON_ERROR
1544 bnx2x_update_sge_prod(fp,
1545 &cqe->fast_path_cqe);
1550 pci_dma_sync_single_for_device(bp->pdev,
1551 pci_unmap_addr(rx_buf, mapping),
1552 pad + RX_COPY_THRESH,
1553 PCI_DMA_FROMDEVICE);
1555 prefetch(((char *)(skb)) + 128);
1557 /* is this an error packet? */
1558 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1559 DP(NETIF_MSG_RX_ERR,
1560 "ERROR flags %x rx packet %u\n",
1561 cqe_fp_flags, sw_comp_cons);
1562 fp->eth_q_stats.rx_err_discard_pkt++;
1566 /* Since we don't have a jumbo ring
1567 * copy small packets if mtu > 1500
1569 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1570 (len <= RX_COPY_THRESH)) {
1571 struct sk_buff *new_skb;
1573 new_skb = netdev_alloc_skb(bp->dev,
1575 if (new_skb == NULL) {
1576 DP(NETIF_MSG_RX_ERR,
1577 "ERROR packet dropped "
1578 "because of alloc failure\n");
1579 fp->eth_q_stats.rx_skb_alloc_failed++;
1584 skb_copy_from_linear_data_offset(skb, pad,
1585 new_skb->data + pad, len);
1586 skb_reserve(new_skb, pad);
1587 skb_put(new_skb, len);
1589 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1593 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1594 pci_unmap_single(bp->pdev,
1595 pci_unmap_addr(rx_buf, mapping),
1597 PCI_DMA_FROMDEVICE);
1598 skb_reserve(skb, pad);
1602 DP(NETIF_MSG_RX_ERR,
1603 "ERROR packet dropped because "
1604 "of alloc failure\n");
1605 fp->eth_q_stats.rx_skb_alloc_failed++;
1607 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1611 skb->protocol = eth_type_trans(skb, bp->dev);
1613 skb->ip_summed = CHECKSUM_NONE;
1615 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1616 skb->ip_summed = CHECKSUM_UNNECESSARY;
1618 fp->eth_q_stats.hw_csum_err++;
1622 skb_record_rx_queue(skb, fp->index);
1624 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1625 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1626 PARSING_FLAGS_VLAN))
1627 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1628 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1631 netif_receive_skb(skb);
1637 bd_cons = NEXT_RX_IDX(bd_cons);
1638 bd_prod = NEXT_RX_IDX(bd_prod);
1639 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1642 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1643 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1645 if (rx_pkt == budget)
1649 fp->rx_bd_cons = bd_cons;
1650 fp->rx_bd_prod = bd_prod_fw;
1651 fp->rx_comp_cons = sw_comp_cons;
1652 fp->rx_comp_prod = sw_comp_prod;
1654 /* Update producers */
1655 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1658 fp->rx_pkt += rx_pkt;
1664 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1666 struct bnx2x_fastpath *fp = fp_cookie;
1667 struct bnx2x *bp = fp->bp;
1668 int index = fp->index;
1670 /* Return here if interrupt is disabled */
1671 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1672 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1676 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1678 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1680 #ifdef BNX2X_STOP_ON_ERROR
1681 if (unlikely(bp->panic))
1685 prefetch(fp->rx_cons_sb);
1686 prefetch(fp->tx_cons_sb);
1687 prefetch(&fp->status_blk->c_status_block.status_block_index);
1688 prefetch(&fp->status_blk->u_status_block.status_block_index);
1690 napi_schedule(&bnx2x_fp(bp, index, napi));
1695 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1697 struct bnx2x *bp = netdev_priv(dev_instance);
1698 u16 status = bnx2x_ack_int(bp);
1701 /* Return here if interrupt is shared and it's not for us */
1702 if (unlikely(status == 0)) {
1703 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1706 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1708 /* Return here if interrupt is disabled */
1709 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1710 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1714 #ifdef BNX2X_STOP_ON_ERROR
1715 if (unlikely(bp->panic))
1719 mask = 0x2 << bp->fp[0].sb_id;
1720 if (status & mask) {
1721 struct bnx2x_fastpath *fp = &bp->fp[0];
1723 prefetch(fp->rx_cons_sb);
1724 prefetch(fp->tx_cons_sb);
1725 prefetch(&fp->status_blk->c_status_block.status_block_index);
1726 prefetch(&fp->status_blk->u_status_block.status_block_index);
1728 napi_schedule(&bnx2x_fp(bp, 0, napi));
1734 if (unlikely(status & 0x1)) {
1735 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1743 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1749 /* end of fast path */
1751 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1756 * General service functions
1759 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1762 u32 resource_bit = (1 << resource);
1763 int func = BP_FUNC(bp);
1764 u32 hw_lock_control_reg;
1767 /* Validating that the resource is within range */
1768 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1770 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1771 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1776 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1778 hw_lock_control_reg =
1779 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1782 /* Validating that the resource is not already taken */
1783 lock_status = REG_RD(bp, hw_lock_control_reg);
1784 if (lock_status & resource_bit) {
1785 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1786 lock_status, resource_bit);
1790 /* Try for 5 second every 5ms */
1791 for (cnt = 0; cnt < 1000; cnt++) {
1792 /* Try to acquire the lock */
1793 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1794 lock_status = REG_RD(bp, hw_lock_control_reg);
1795 if (lock_status & resource_bit)
1800 DP(NETIF_MSG_HW, "Timeout\n");
1804 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1807 u32 resource_bit = (1 << resource);
1808 int func = BP_FUNC(bp);
1809 u32 hw_lock_control_reg;
1811 /* Validating that the resource is within range */
1812 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1814 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1815 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1820 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1822 hw_lock_control_reg =
1823 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1826 /* Validating that the resource is currently taken */
1827 lock_status = REG_RD(bp, hw_lock_control_reg);
1828 if (!(lock_status & resource_bit)) {
1829 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1830 lock_status, resource_bit);
1834 REG_WR(bp, hw_lock_control_reg, resource_bit);
1838 /* HW Lock for shared dual port PHYs */
1839 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1841 mutex_lock(&bp->port.phy_mutex);
1843 if (bp->port.need_hw_lock)
1844 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1847 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1849 if (bp->port.need_hw_lock)
1850 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1852 mutex_unlock(&bp->port.phy_mutex);
1855 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1857 /* The GPIO should be swapped if swap register is set and active */
1858 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1859 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1860 int gpio_shift = gpio_num +
1861 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1862 u32 gpio_mask = (1 << gpio_shift);
1866 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1867 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1871 /* read GPIO value */
1872 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1874 /* get the requested pin value */
1875 if ((gpio_reg & gpio_mask) == gpio_mask)
1880 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1885 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1887 /* The GPIO should be swapped if swap register is set and active */
1888 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1889 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1890 int gpio_shift = gpio_num +
1891 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1892 u32 gpio_mask = (1 << gpio_shift);
1895 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1896 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1900 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1901 /* read GPIO and mask except the float bits */
1902 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1905 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1906 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1907 gpio_num, gpio_shift);
1908 /* clear FLOAT and set CLR */
1909 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1910 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1913 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1914 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1915 gpio_num, gpio_shift);
1916 /* clear FLOAT and set SET */
1917 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1918 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1921 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1922 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1923 gpio_num, gpio_shift);
1925 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1932 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1933 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1938 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1940 /* The GPIO should be swapped if swap register is set and active */
1941 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1942 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1943 int gpio_shift = gpio_num +
1944 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1945 u32 gpio_mask = (1 << gpio_shift);
1948 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1949 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1953 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1955 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1958 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1959 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1960 "output low\n", gpio_num, gpio_shift);
1961 /* clear SET and set CLR */
1962 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1963 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1966 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1967 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1968 "output high\n", gpio_num, gpio_shift);
1969 /* clear CLR and set SET */
1970 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1971 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1978 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1979 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1984 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1986 u32 spio_mask = (1 << spio_num);
1989 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1990 (spio_num > MISC_REGISTERS_SPIO_7)) {
1991 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1995 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1996 /* read SPIO and mask except the float bits */
1997 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2000 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2001 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2002 /* clear FLOAT and set CLR */
2003 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2004 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2007 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2008 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2009 /* clear FLOAT and set SET */
2010 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2011 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2014 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2015 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2017 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2024 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2025 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2030 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2032 switch (bp->link_vars.ieee_fc &
2033 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2034 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2035 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2039 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2040 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2044 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2045 bp->port.advertising |= ADVERTISED_Asym_Pause;
2049 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2055 static void bnx2x_link_report(struct bnx2x *bp)
2057 if (bp->link_vars.link_up) {
2058 if (bp->state == BNX2X_STATE_OPEN)
2059 netif_carrier_on(bp->dev);
2060 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2062 printk("%d Mbps ", bp->link_vars.line_speed);
2064 if (bp->link_vars.duplex == DUPLEX_FULL)
2065 printk("full duplex");
2067 printk("half duplex");
2069 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2070 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2071 printk(", receive ");
2072 if (bp->link_vars.flow_ctrl &
2074 printk("& transmit ");
2076 printk(", transmit ");
2078 printk("flow control ON");
2082 } else { /* link_down */
2083 netif_carrier_off(bp->dev);
2084 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2088 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2090 if (!BP_NOMCP(bp)) {
2093 /* Initialize link parameters structure variables */
2094 /* It is recommended to turn off RX FC for jumbo frames
2095 for better performance */
2097 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2098 else if (bp->dev->mtu > 5000)
2099 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2101 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2103 bnx2x_acquire_phy_lock(bp);
2105 if (load_mode == LOAD_DIAG)
2106 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2108 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2110 bnx2x_release_phy_lock(bp);
2112 bnx2x_calc_fc_adv(bp);
2114 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2115 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2116 bnx2x_link_report(bp);
2121 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2125 static void bnx2x_link_set(struct bnx2x *bp)
2127 if (!BP_NOMCP(bp)) {
2128 bnx2x_acquire_phy_lock(bp);
2129 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2130 bnx2x_release_phy_lock(bp);
2132 bnx2x_calc_fc_adv(bp);
2134 BNX2X_ERR("Bootcode is missing - can not set link\n");
2137 static void bnx2x__link_reset(struct bnx2x *bp)
2139 if (!BP_NOMCP(bp)) {
2140 bnx2x_acquire_phy_lock(bp);
2141 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2142 bnx2x_release_phy_lock(bp);
2144 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2147 static u8 bnx2x_link_test(struct bnx2x *bp)
2151 bnx2x_acquire_phy_lock(bp);
2152 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2153 bnx2x_release_phy_lock(bp);
2158 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2160 u32 r_param = bp->link_vars.line_speed / 8;
2161 u32 fair_periodic_timeout_usec;
2164 memset(&(bp->cmng.rs_vars), 0,
2165 sizeof(struct rate_shaping_vars_per_port));
2166 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2168 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2169 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2171 /* this is the threshold below which no timer arming will occur
2172 1.25 coefficient is for the threshold to be a little bigger
2173 than the real time, to compensate for timer in-accuracy */
2174 bp->cmng.rs_vars.rs_threshold =
2175 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2177 /* resolution of fairness timer */
2178 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2179 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2180 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2182 /* this is the threshold below which we won't arm the timer anymore */
2183 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2185 /* we multiply by 1e3/8 to get bytes/msec.
2186 We don't want the credits to pass a credit
2187 of the t_fair*FAIR_MEM (algorithm resolution) */
2188 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2189 /* since each tick is 4 usec */
2190 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2193 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2195 struct rate_shaping_vars_per_vn m_rs_vn;
2196 struct fairness_vars_per_vn m_fair_vn;
2197 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2198 u16 vn_min_rate, vn_max_rate;
2201 /* If function is hidden - set min and max to zeroes */
2202 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2207 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2208 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2209 /* If fairness is enabled (not all min rates are zeroes) and
2210 if current min rate is zero - set it to 1.
2211 This is a requirement of the algorithm. */
2212 if (bp->vn_weight_sum && (vn_min_rate == 0))
2213 vn_min_rate = DEF_MIN_RATE;
2214 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2215 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2219 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2220 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2222 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2223 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2225 /* global vn counter - maximal Mbps for this vn */
2226 m_rs_vn.vn_counter.rate = vn_max_rate;
2228 /* quota - number of bytes transmitted in this period */
2229 m_rs_vn.vn_counter.quota =
2230 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2232 if (bp->vn_weight_sum) {
2233 /* credit for each period of the fairness algorithm:
2234 number of bytes in T_FAIR (the vn share the port rate).
2235 vn_weight_sum should not be larger than 10000, thus
2236 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2238 m_fair_vn.vn_credit_delta =
2239 max((u32)(vn_min_rate * (T_FAIR_COEF /
2240 (8 * bp->vn_weight_sum))),
2241 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2242 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2243 m_fair_vn.vn_credit_delta);
2246 /* Store it to internal memory */
2247 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2248 REG_WR(bp, BAR_XSTRORM_INTMEM +
2249 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2250 ((u32 *)(&m_rs_vn))[i]);
2252 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2253 REG_WR(bp, BAR_XSTRORM_INTMEM +
2254 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2255 ((u32 *)(&m_fair_vn))[i]);
2259 /* This function is called upon link interrupt */
2260 static void bnx2x_link_attn(struct bnx2x *bp)
2262 /* Make sure that we are synced with the current statistics */
2263 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2265 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2267 if (bp->link_vars.link_up) {
2269 /* dropless flow control */
2270 if (CHIP_IS_E1H(bp)) {
2271 int port = BP_PORT(bp);
2272 u32 pause_enabled = 0;
2274 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2277 REG_WR(bp, BAR_USTRORM_INTMEM +
2278 USTORM_PAUSE_ENABLED_OFFSET(port),
2282 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2283 struct host_port_stats *pstats;
2285 pstats = bnx2x_sp(bp, port_stats);
2286 /* reset old bmac stats */
2287 memset(&(pstats->mac_stx[0]), 0,
2288 sizeof(struct mac_stx));
2290 if ((bp->state == BNX2X_STATE_OPEN) ||
2291 (bp->state == BNX2X_STATE_DISABLED))
2292 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2295 /* indicate link status */
2296 bnx2x_link_report(bp);
2299 int port = BP_PORT(bp);
2303 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2304 if (vn == BP_E1HVN(bp))
2307 func = ((vn << 1) | port);
2309 /* Set the attention towards other drivers
2311 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2312 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2315 if (bp->link_vars.link_up) {
2318 /* Init rate shaping and fairness contexts */
2319 bnx2x_init_port_minmax(bp);
2321 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2322 bnx2x_init_vn_minmax(bp, 2*vn + port);
2324 /* Store it to internal memory */
2326 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2327 REG_WR(bp, BAR_XSTRORM_INTMEM +
2328 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2329 ((u32 *)(&bp->cmng))[i]);
2334 static void bnx2x__link_status_update(struct bnx2x *bp)
2336 if (bp->state != BNX2X_STATE_OPEN)
2339 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2341 if (bp->link_vars.link_up)
2342 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2344 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2346 /* indicate link status */
2347 bnx2x_link_report(bp);
2350 static void bnx2x_pmf_update(struct bnx2x *bp)
2352 int port = BP_PORT(bp);
2356 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2358 /* enable nig attention */
2359 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2360 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2361 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2363 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2371 * General service functions
2374 /* the slow path queue is odd since completions arrive on the fastpath ring */
2375 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2376 u32 data_hi, u32 data_lo, int common)
2378 int func = BP_FUNC(bp);
2380 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2381 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2382 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2383 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2384 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2386 #ifdef BNX2X_STOP_ON_ERROR
2387 if (unlikely(bp->panic))
2391 spin_lock_bh(&bp->spq_lock);
2393 if (!bp->spq_left) {
2394 BNX2X_ERR("BUG! SPQ ring full!\n");
2395 spin_unlock_bh(&bp->spq_lock);
2400 /* CID needs port number to be encoded int it */
2401 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2402 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2404 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2406 bp->spq_prod_bd->hdr.type |=
2407 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2409 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2410 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2414 if (bp->spq_prod_bd == bp->spq_last_bd) {
2415 bp->spq_prod_bd = bp->spq;
2416 bp->spq_prod_idx = 0;
2417 DP(NETIF_MSG_TIMER, "end of spq\n");
2424 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2427 spin_unlock_bh(&bp->spq_lock);
2431 /* acquire split MCP access lock register */
2432 static int bnx2x_acquire_alr(struct bnx2x *bp)
2439 for (j = 0; j < i*10; j++) {
2441 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2442 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2443 if (val & (1L << 31))
2448 if (!(val & (1L << 31))) {
2449 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2456 /* release split MCP access lock register */
2457 static void bnx2x_release_alr(struct bnx2x *bp)
2461 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2464 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2466 struct host_def_status_block *def_sb = bp->def_status_blk;
2469 barrier(); /* status block is written to by the chip */
2470 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2471 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2474 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2475 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2478 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2479 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2482 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2483 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2486 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2487 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2494 * slow path service functions
2497 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2499 int port = BP_PORT(bp);
2500 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2501 COMMAND_REG_ATTN_BITS_SET);
2502 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2503 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2504 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2505 NIG_REG_MASK_INTERRUPT_PORT0;
2509 if (bp->attn_state & asserted)
2510 BNX2X_ERR("IGU ERROR\n");
2512 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2513 aeu_mask = REG_RD(bp, aeu_addr);
2515 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2516 aeu_mask, asserted);
2517 aeu_mask &= ~(asserted & 0xff);
2518 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2520 REG_WR(bp, aeu_addr, aeu_mask);
2521 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2523 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2524 bp->attn_state |= asserted;
2525 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2527 if (asserted & ATTN_HARD_WIRED_MASK) {
2528 if (asserted & ATTN_NIG_FOR_FUNC) {
2530 bnx2x_acquire_phy_lock(bp);
2532 /* save nig interrupt mask */
2533 nig_mask = REG_RD(bp, nig_int_mask_addr);
2534 REG_WR(bp, nig_int_mask_addr, 0);
2536 bnx2x_link_attn(bp);
2538 /* handle unicore attn? */
2540 if (asserted & ATTN_SW_TIMER_4_FUNC)
2541 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2543 if (asserted & GPIO_2_FUNC)
2544 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2546 if (asserted & GPIO_3_FUNC)
2547 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2549 if (asserted & GPIO_4_FUNC)
2550 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2553 if (asserted & ATTN_GENERAL_ATTN_1) {
2554 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2555 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2557 if (asserted & ATTN_GENERAL_ATTN_2) {
2558 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2559 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2561 if (asserted & ATTN_GENERAL_ATTN_3) {
2562 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2563 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2566 if (asserted & ATTN_GENERAL_ATTN_4) {
2567 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2568 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2570 if (asserted & ATTN_GENERAL_ATTN_5) {
2571 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2572 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2574 if (asserted & ATTN_GENERAL_ATTN_6) {
2575 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2576 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2580 } /* if hardwired */
2582 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2584 REG_WR(bp, hc_addr, asserted);
2586 /* now set back the mask */
2587 if (asserted & ATTN_NIG_FOR_FUNC) {
2588 REG_WR(bp, nig_int_mask_addr, nig_mask);
2589 bnx2x_release_phy_lock(bp);
2593 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2595 int port = BP_PORT(bp);
2599 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2600 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2602 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2604 val = REG_RD(bp, reg_offset);
2605 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2606 REG_WR(bp, reg_offset, val);
2608 BNX2X_ERR("SPIO5 hw attention\n");
2610 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2611 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2612 /* Fan failure attention */
2614 /* The PHY reset is controlled by GPIO 1 */
2615 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2616 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2617 /* Low power mode is controlled by GPIO 2 */
2618 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2619 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2620 /* mark the failure */
2621 bp->link_params.ext_phy_config &=
2622 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2623 bp->link_params.ext_phy_config |=
2624 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2626 dev_info.port_hw_config[port].
2627 external_phy_config,
2628 bp->link_params.ext_phy_config);
2629 /* log the failure */
2630 printk(KERN_ERR PFX "Fan Failure on Network"
2631 " Controller %s has caused the driver to"
2632 " shutdown the card to prevent permanent"
2633 " damage. Please contact Dell Support for"
2634 " assistance\n", bp->dev->name);
2642 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2643 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2644 bnx2x_acquire_phy_lock(bp);
2645 bnx2x_handle_module_detect_int(&bp->link_params);
2646 bnx2x_release_phy_lock(bp);
2649 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2651 val = REG_RD(bp, reg_offset);
2652 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2653 REG_WR(bp, reg_offset, val);
2655 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2656 (attn & HW_INTERRUT_ASSERT_SET_0));
2661 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2665 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2667 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2668 BNX2X_ERR("DB hw attention 0x%x\n", val);
2669 /* DORQ discard attention */
2671 BNX2X_ERR("FATAL error from DORQ\n");
2674 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2676 int port = BP_PORT(bp);
2679 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2680 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2682 val = REG_RD(bp, reg_offset);
2683 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2684 REG_WR(bp, reg_offset, val);
2686 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2687 (attn & HW_INTERRUT_ASSERT_SET_1));
2692 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2696 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2698 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2699 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2700 /* CFC error attention */
2702 BNX2X_ERR("FATAL error from CFC\n");
2705 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2707 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2708 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2709 /* RQ_USDMDP_FIFO_OVERFLOW */
2711 BNX2X_ERR("FATAL error from PXP\n");
2714 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2716 int port = BP_PORT(bp);
2719 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2720 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2722 val = REG_RD(bp, reg_offset);
2723 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2724 REG_WR(bp, reg_offset, val);
2726 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2727 (attn & HW_INTERRUT_ASSERT_SET_2));
2732 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2736 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2738 if (attn & BNX2X_PMF_LINK_ASSERT) {
2739 int func = BP_FUNC(bp);
2741 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2742 bnx2x__link_status_update(bp);
2743 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2745 bnx2x_pmf_update(bp);
2747 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2749 BNX2X_ERR("MC assert!\n");
2750 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2751 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2752 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2753 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2756 } else if (attn & BNX2X_MCP_ASSERT) {
2758 BNX2X_ERR("MCP assert!\n");
2759 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2763 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2766 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2767 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2768 if (attn & BNX2X_GRC_TIMEOUT) {
2769 val = CHIP_IS_E1H(bp) ?
2770 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2771 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2773 if (attn & BNX2X_GRC_RSV) {
2774 val = CHIP_IS_E1H(bp) ?
2775 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2776 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2778 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2782 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2784 struct attn_route attn;
2785 struct attn_route group_mask;
2786 int port = BP_PORT(bp);
2792 /* need to take HW lock because MCP or other port might also
2793 try to handle this event */
2794 bnx2x_acquire_alr(bp);
2796 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2797 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2798 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2799 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2800 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2801 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2803 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2804 if (deasserted & (1 << index)) {
2805 group_mask = bp->attn_group[index];
2807 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2808 index, group_mask.sig[0], group_mask.sig[1],
2809 group_mask.sig[2], group_mask.sig[3]);
2811 bnx2x_attn_int_deasserted3(bp,
2812 attn.sig[3] & group_mask.sig[3]);
2813 bnx2x_attn_int_deasserted1(bp,
2814 attn.sig[1] & group_mask.sig[1]);
2815 bnx2x_attn_int_deasserted2(bp,
2816 attn.sig[2] & group_mask.sig[2]);
2817 bnx2x_attn_int_deasserted0(bp,
2818 attn.sig[0] & group_mask.sig[0]);
2820 if ((attn.sig[0] & group_mask.sig[0] &
2821 HW_PRTY_ASSERT_SET_0) ||
2822 (attn.sig[1] & group_mask.sig[1] &
2823 HW_PRTY_ASSERT_SET_1) ||
2824 (attn.sig[2] & group_mask.sig[2] &
2825 HW_PRTY_ASSERT_SET_2))
2826 BNX2X_ERR("FATAL HW block parity attention\n");
2830 bnx2x_release_alr(bp);
2832 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2835 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2837 REG_WR(bp, reg_addr, val);
2839 if (~bp->attn_state & deasserted)
2840 BNX2X_ERR("IGU ERROR\n");
2842 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2843 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2845 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2846 aeu_mask = REG_RD(bp, reg_addr);
2848 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2849 aeu_mask, deasserted);
2850 aeu_mask |= (deasserted & 0xff);
2851 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2853 REG_WR(bp, reg_addr, aeu_mask);
2854 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2856 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2857 bp->attn_state &= ~deasserted;
2858 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2861 static void bnx2x_attn_int(struct bnx2x *bp)
2863 /* read local copy of bits */
2864 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2866 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2868 u32 attn_state = bp->attn_state;
2870 /* look for changed bits */
2871 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2872 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2875 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2876 attn_bits, attn_ack, asserted, deasserted);
2878 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2879 BNX2X_ERR("BAD attention state\n");
2881 /* handle bits that were raised */
2883 bnx2x_attn_int_asserted(bp, asserted);
2886 bnx2x_attn_int_deasserted(bp, deasserted);
2889 static void bnx2x_sp_task(struct work_struct *work)
2891 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2895 /* Return here if interrupt is disabled */
2896 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2897 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2901 status = bnx2x_update_dsb_idx(bp);
2902 /* if (status == 0) */
2903 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2905 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2911 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2913 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2915 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2917 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2919 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2924 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2926 struct net_device *dev = dev_instance;
2927 struct bnx2x *bp = netdev_priv(dev);
2929 /* Return here if interrupt is disabled */
2930 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2931 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2935 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2937 #ifdef BNX2X_STOP_ON_ERROR
2938 if (unlikely(bp->panic))
2942 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2947 /* end of slow path */
2951 /****************************************************************************
2953 ****************************************************************************/
2955 /* sum[hi:lo] += add[hi:lo] */
2956 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2959 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2962 /* difference = minuend - subtrahend */
2963 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2965 if (m_lo < s_lo) { \
2967 d_hi = m_hi - s_hi; \
2969 /* we can 'loan' 1 */ \
2971 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2973 /* m_hi <= s_hi */ \
2978 /* m_lo >= s_lo */ \
2979 if (m_hi < s_hi) { \
2983 /* m_hi >= s_hi */ \
2984 d_hi = m_hi - s_hi; \
2985 d_lo = m_lo - s_lo; \
2990 #define UPDATE_STAT64(s, t) \
2992 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2993 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2994 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2995 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2996 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2997 pstats->mac_stx[1].t##_lo, diff.lo); \
3000 #define UPDATE_STAT64_NIG(s, t) \
3002 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3003 diff.lo, new->s##_lo, old->s##_lo); \
3004 ADD_64(estats->t##_hi, diff.hi, \
3005 estats->t##_lo, diff.lo); \
3008 /* sum[hi:lo] += add */
3009 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3012 s_hi += (s_lo < a) ? 1 : 0; \
3015 #define UPDATE_EXTEND_STAT(s) \
3017 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3018 pstats->mac_stx[1].s##_lo, \
3022 #define UPDATE_EXTEND_TSTAT(s, t) \
3024 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3025 old_tclient->s = tclient->s; \
3026 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3029 #define UPDATE_EXTEND_USTAT(s, t) \
3031 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3032 old_uclient->s = uclient->s; \
3033 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3036 #define UPDATE_EXTEND_XSTAT(s, t) \
3038 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3039 old_xclient->s = xclient->s; \
3040 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3043 /* minuend -= subtrahend */
3044 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3046 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3049 /* minuend[hi:lo] -= subtrahend */
3050 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3052 SUB_64(m_hi, 0, m_lo, s); \
3055 #define SUB_EXTEND_USTAT(s, t) \
3057 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3058 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3062 * General service functions
3065 static inline long bnx2x_hilo(u32 *hiref)
3067 u32 lo = *(hiref + 1);
3068 #if (BITS_PER_LONG == 64)
3071 return HILO_U64(hi, lo);
3078 * Init service functions
3081 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3083 if (!bp->stats_pending) {
3084 struct eth_query_ramrod_data ramrod_data = {0};
3087 ramrod_data.drv_counter = bp->stats_counter++;
3088 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3089 for_each_queue(bp, i)
3090 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3092 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3093 ((u32 *)&ramrod_data)[1],
3094 ((u32 *)&ramrod_data)[0], 0);
3096 /* stats ramrod has it's own slot on the spq */
3098 bp->stats_pending = 1;
3103 static void bnx2x_stats_init(struct bnx2x *bp)
3105 int port = BP_PORT(bp);
3108 bp->stats_pending = 0;
3109 bp->executer_idx = 0;
3110 bp->stats_counter = 0;
3114 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3116 bp->port.port_stx = 0;
3117 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3119 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3120 bp->port.old_nig_stats.brb_discard =
3121 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3122 bp->port.old_nig_stats.brb_truncate =
3123 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3124 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3125 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3126 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3127 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3129 /* function stats */
3130 for_each_queue(bp, i) {
3131 struct bnx2x_fastpath *fp = &bp->fp[i];
3133 memset(&fp->old_tclient, 0,
3134 sizeof(struct tstorm_per_client_stats));
3135 memset(&fp->old_uclient, 0,
3136 sizeof(struct ustorm_per_client_stats));
3137 memset(&fp->old_xclient, 0,
3138 sizeof(struct xstorm_per_client_stats));
3139 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3142 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3143 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3145 bp->stats_state = STATS_STATE_DISABLED;
3146 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3147 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3150 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3152 struct dmae_command *dmae = &bp->stats_dmae;
3153 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3155 *stats_comp = DMAE_COMP_VAL;
3156 if (CHIP_REV_IS_SLOW(bp))
3160 if (bp->executer_idx) {
3161 int loader_idx = PMF_DMAE_C(bp);
3163 memset(dmae, 0, sizeof(struct dmae_command));
3165 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3166 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3167 DMAE_CMD_DST_RESET |
3169 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3171 DMAE_CMD_ENDIANITY_DW_SWAP |
3173 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3175 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3176 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3177 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3178 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3179 sizeof(struct dmae_command) *
3180 (loader_idx + 1)) >> 2;
3181 dmae->dst_addr_hi = 0;
3182 dmae->len = sizeof(struct dmae_command) >> 2;
3185 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3186 dmae->comp_addr_hi = 0;
3190 bnx2x_post_dmae(bp, dmae, loader_idx);
3192 } else if (bp->func_stx) {
3194 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3198 static int bnx2x_stats_comp(struct bnx2x *bp)
3200 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3204 while (*stats_comp != DMAE_COMP_VAL) {
3206 BNX2X_ERR("timeout waiting for stats finished\n");
3216 * Statistics service functions
3219 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3221 struct dmae_command *dmae;
3223 int loader_idx = PMF_DMAE_C(bp);
3224 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3227 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3228 BNX2X_ERR("BUG!\n");
3232 bp->executer_idx = 0;
3234 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3236 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3238 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3240 DMAE_CMD_ENDIANITY_DW_SWAP |
3242 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3243 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3245 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3246 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3247 dmae->src_addr_lo = bp->port.port_stx >> 2;
3248 dmae->src_addr_hi = 0;
3249 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3250 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3251 dmae->len = DMAE_LEN32_RD_MAX;
3252 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3253 dmae->comp_addr_hi = 0;
3256 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3257 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3258 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3259 dmae->src_addr_hi = 0;
3260 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3261 DMAE_LEN32_RD_MAX * 4);
3262 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3263 DMAE_LEN32_RD_MAX * 4);
3264 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3265 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3266 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3267 dmae->comp_val = DMAE_COMP_VAL;
3270 bnx2x_hw_stats_post(bp);
3271 bnx2x_stats_comp(bp);
3274 static void bnx2x_port_stats_init(struct bnx2x *bp)
3276 struct dmae_command *dmae;
3277 int port = BP_PORT(bp);
3278 int vn = BP_E1HVN(bp);
3280 int loader_idx = PMF_DMAE_C(bp);
3282 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3285 if (!bp->link_vars.link_up || !bp->port.pmf) {
3286 BNX2X_ERR("BUG!\n");
3290 bp->executer_idx = 0;
3293 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3294 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3295 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3297 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3299 DMAE_CMD_ENDIANITY_DW_SWAP |
3301 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3302 (vn << DMAE_CMD_E1HVN_SHIFT));
3304 if (bp->port.port_stx) {
3306 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3307 dmae->opcode = opcode;
3308 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3309 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3310 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3311 dmae->dst_addr_hi = 0;
3312 dmae->len = sizeof(struct host_port_stats) >> 2;
3313 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3314 dmae->comp_addr_hi = 0;
3320 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3321 dmae->opcode = opcode;
3322 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3323 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3324 dmae->dst_addr_lo = bp->func_stx >> 2;
3325 dmae->dst_addr_hi = 0;
3326 dmae->len = sizeof(struct host_func_stats) >> 2;
3327 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3328 dmae->comp_addr_hi = 0;
3333 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3334 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3335 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3337 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3339 DMAE_CMD_ENDIANITY_DW_SWAP |
3341 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3342 (vn << DMAE_CMD_E1HVN_SHIFT));
3344 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3346 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3347 NIG_REG_INGRESS_BMAC0_MEM);
3349 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3350 BIGMAC_REGISTER_TX_STAT_GTBYT */
3351 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3352 dmae->opcode = opcode;
3353 dmae->src_addr_lo = (mac_addr +
3354 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3355 dmae->src_addr_hi = 0;
3356 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3357 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3358 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3359 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3360 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3361 dmae->comp_addr_hi = 0;
3364 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3365 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3366 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3367 dmae->opcode = opcode;
3368 dmae->src_addr_lo = (mac_addr +
3369 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3370 dmae->src_addr_hi = 0;
3371 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3372 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3373 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3374 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3375 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3376 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3377 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3378 dmae->comp_addr_hi = 0;
3381 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3383 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3385 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3386 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3387 dmae->opcode = opcode;
3388 dmae->src_addr_lo = (mac_addr +
3389 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3390 dmae->src_addr_hi = 0;
3391 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3392 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3393 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3394 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3395 dmae->comp_addr_hi = 0;
3398 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3399 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3400 dmae->opcode = opcode;
3401 dmae->src_addr_lo = (mac_addr +
3402 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3403 dmae->src_addr_hi = 0;
3404 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3405 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3406 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3407 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3409 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3410 dmae->comp_addr_hi = 0;
3413 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3414 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3415 dmae->opcode = opcode;
3416 dmae->src_addr_lo = (mac_addr +
3417 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3418 dmae->src_addr_hi = 0;
3419 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3420 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3421 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3422 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3423 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3424 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3425 dmae->comp_addr_hi = 0;
3430 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3431 dmae->opcode = opcode;
3432 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3433 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3434 dmae->src_addr_hi = 0;
3435 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3436 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3437 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3438 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3439 dmae->comp_addr_hi = 0;
3442 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3443 dmae->opcode = opcode;
3444 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3445 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3446 dmae->src_addr_hi = 0;
3447 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3448 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3449 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3450 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3451 dmae->len = (2*sizeof(u32)) >> 2;
3452 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3453 dmae->comp_addr_hi = 0;
3456 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3457 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3458 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3459 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3461 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3463 DMAE_CMD_ENDIANITY_DW_SWAP |
3465 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3466 (vn << DMAE_CMD_E1HVN_SHIFT));
3467 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3468 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3469 dmae->src_addr_hi = 0;
3470 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3471 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3472 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3473 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3474 dmae->len = (2*sizeof(u32)) >> 2;
3475 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3476 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3477 dmae->comp_val = DMAE_COMP_VAL;
3482 static void bnx2x_func_stats_init(struct bnx2x *bp)
3484 struct dmae_command *dmae = &bp->stats_dmae;
3485 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3488 if (!bp->func_stx) {
3489 BNX2X_ERR("BUG!\n");
3493 bp->executer_idx = 0;
3494 memset(dmae, 0, sizeof(struct dmae_command));
3496 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3497 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3498 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3500 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3502 DMAE_CMD_ENDIANITY_DW_SWAP |
3504 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3505 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3506 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3507 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3508 dmae->dst_addr_lo = bp->func_stx >> 2;
3509 dmae->dst_addr_hi = 0;
3510 dmae->len = sizeof(struct host_func_stats) >> 2;
3511 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3512 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3513 dmae->comp_val = DMAE_COMP_VAL;
3518 static void bnx2x_stats_start(struct bnx2x *bp)
3521 bnx2x_port_stats_init(bp);
3523 else if (bp->func_stx)
3524 bnx2x_func_stats_init(bp);
3526 bnx2x_hw_stats_post(bp);
3527 bnx2x_storm_stats_post(bp);
3530 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3532 bnx2x_stats_comp(bp);
3533 bnx2x_stats_pmf_update(bp);
3534 bnx2x_stats_start(bp);
3537 static void bnx2x_stats_restart(struct bnx2x *bp)
3539 bnx2x_stats_comp(bp);
3540 bnx2x_stats_start(bp);
3543 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3545 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3546 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3547 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3553 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3554 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3555 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3556 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3557 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3558 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3559 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3560 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3561 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3562 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3563 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3564 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3565 UPDATE_STAT64(tx_stat_gt127,
3566 tx_stat_etherstatspkts65octetsto127octets);
3567 UPDATE_STAT64(tx_stat_gt255,
3568 tx_stat_etherstatspkts128octetsto255octets);
3569 UPDATE_STAT64(tx_stat_gt511,
3570 tx_stat_etherstatspkts256octetsto511octets);
3571 UPDATE_STAT64(tx_stat_gt1023,
3572 tx_stat_etherstatspkts512octetsto1023octets);
3573 UPDATE_STAT64(tx_stat_gt1518,
3574 tx_stat_etherstatspkts1024octetsto1522octets);
3575 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3576 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3577 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3578 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3579 UPDATE_STAT64(tx_stat_gterr,
3580 tx_stat_dot3statsinternalmactransmiterrors);
3581 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3583 estats->pause_frames_received_hi =
3584 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3585 estats->pause_frames_received_lo =
3586 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3588 estats->pause_frames_sent_hi =
3589 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3590 estats->pause_frames_sent_lo =
3591 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3594 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3596 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3597 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3598 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3600 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3601 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3602 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3603 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3604 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3605 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3606 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3607 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3608 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3609 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3610 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3611 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3612 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3613 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3614 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3615 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3616 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3617 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3618 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3619 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3620 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3621 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3622 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3623 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3624 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3625 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3626 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3627 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3628 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3629 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3630 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3632 estats->pause_frames_received_hi =
3633 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3634 estats->pause_frames_received_lo =
3635 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3636 ADD_64(estats->pause_frames_received_hi,
3637 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3638 estats->pause_frames_received_lo,
3639 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3641 estats->pause_frames_sent_hi =
3642 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3643 estats->pause_frames_sent_lo =
3644 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3645 ADD_64(estats->pause_frames_sent_hi,
3646 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3647 estats->pause_frames_sent_lo,
3648 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3651 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3653 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3654 struct nig_stats *old = &(bp->port.old_nig_stats);
3655 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3656 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3663 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3664 bnx2x_bmac_stats_update(bp);
3666 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3667 bnx2x_emac_stats_update(bp);
3669 else { /* unreached */
3670 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3674 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3675 new->brb_discard - old->brb_discard);
3676 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3677 new->brb_truncate - old->brb_truncate);
3679 UPDATE_STAT64_NIG(egress_mac_pkt0,
3680 etherstatspkts1024octetsto1522octets);
3681 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3683 memcpy(old, new, sizeof(struct nig_stats));
3685 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3686 sizeof(struct mac_stx));
3687 estats->brb_drop_hi = pstats->brb_drop_hi;
3688 estats->brb_drop_lo = pstats->brb_drop_lo;
3690 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3692 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3693 if (nig_timer_max != estats->nig_timer_max) {
3694 estats->nig_timer_max = nig_timer_max;
3695 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3701 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3703 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3704 struct tstorm_per_port_stats *tport =
3705 &stats->tstorm_common.port_statistics;
3706 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3707 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3710 memset(&(fstats->total_bytes_received_hi), 0,
3711 sizeof(struct host_func_stats) - 2*sizeof(u32));
3712 estats->error_bytes_received_hi = 0;
3713 estats->error_bytes_received_lo = 0;
3714 estats->etherstatsoverrsizepkts_hi = 0;
3715 estats->etherstatsoverrsizepkts_lo = 0;
3716 estats->no_buff_discard_hi = 0;
3717 estats->no_buff_discard_lo = 0;
3719 for_each_queue(bp, i) {
3720 struct bnx2x_fastpath *fp = &bp->fp[i];
3721 int cl_id = fp->cl_id;
3722 struct tstorm_per_client_stats *tclient =
3723 &stats->tstorm_common.client_statistics[cl_id];
3724 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3725 struct ustorm_per_client_stats *uclient =
3726 &stats->ustorm_common.client_statistics[cl_id];
3727 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3728 struct xstorm_per_client_stats *xclient =
3729 &stats->xstorm_common.client_statistics[cl_id];
3730 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3731 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3734 /* are storm stats valid? */
3735 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3736 bp->stats_counter) {
3737 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3738 " xstorm counter (%d) != stats_counter (%d)\n",
3739 i, xclient->stats_counter, bp->stats_counter);
3742 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3743 bp->stats_counter) {
3744 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3745 " tstorm counter (%d) != stats_counter (%d)\n",
3746 i, tclient->stats_counter, bp->stats_counter);
3749 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3750 bp->stats_counter) {
3751 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3752 " ustorm counter (%d) != stats_counter (%d)\n",
3753 i, uclient->stats_counter, bp->stats_counter);
3757 qstats->total_bytes_received_hi =
3758 qstats->valid_bytes_received_hi =
3759 le32_to_cpu(tclient->total_rcv_bytes.hi);
3760 qstats->total_bytes_received_lo =
3761 qstats->valid_bytes_received_lo =
3762 le32_to_cpu(tclient->total_rcv_bytes.lo);
3764 qstats->error_bytes_received_hi =
3765 le32_to_cpu(tclient->rcv_error_bytes.hi);
3766 qstats->error_bytes_received_lo =
3767 le32_to_cpu(tclient->rcv_error_bytes.lo);
3769 ADD_64(qstats->total_bytes_received_hi,
3770 qstats->error_bytes_received_hi,
3771 qstats->total_bytes_received_lo,
3772 qstats->error_bytes_received_lo);
3774 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3775 total_unicast_packets_received);
3776 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3777 total_multicast_packets_received);
3778 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3779 total_broadcast_packets_received);
3780 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3781 etherstatsoverrsizepkts);
3782 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3784 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3785 total_unicast_packets_received);
3786 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3787 total_multicast_packets_received);
3788 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3789 total_broadcast_packets_received);
3790 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3791 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3792 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3794 qstats->total_bytes_transmitted_hi =
3795 le32_to_cpu(xclient->total_sent_bytes.hi);
3796 qstats->total_bytes_transmitted_lo =
3797 le32_to_cpu(xclient->total_sent_bytes.lo);
3799 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3800 total_unicast_packets_transmitted);
3801 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3802 total_multicast_packets_transmitted);
3803 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3804 total_broadcast_packets_transmitted);
3806 old_tclient->checksum_discard = tclient->checksum_discard;
3807 old_tclient->ttl0_discard = tclient->ttl0_discard;
3809 ADD_64(fstats->total_bytes_received_hi,
3810 qstats->total_bytes_received_hi,
3811 fstats->total_bytes_received_lo,
3812 qstats->total_bytes_received_lo);
3813 ADD_64(fstats->total_bytes_transmitted_hi,
3814 qstats->total_bytes_transmitted_hi,
3815 fstats->total_bytes_transmitted_lo,
3816 qstats->total_bytes_transmitted_lo);
3817 ADD_64(fstats->total_unicast_packets_received_hi,
3818 qstats->total_unicast_packets_received_hi,
3819 fstats->total_unicast_packets_received_lo,
3820 qstats->total_unicast_packets_received_lo);
3821 ADD_64(fstats->total_multicast_packets_received_hi,
3822 qstats->total_multicast_packets_received_hi,
3823 fstats->total_multicast_packets_received_lo,
3824 qstats->total_multicast_packets_received_lo);
3825 ADD_64(fstats->total_broadcast_packets_received_hi,
3826 qstats->total_broadcast_packets_received_hi,
3827 fstats->total_broadcast_packets_received_lo,
3828 qstats->total_broadcast_packets_received_lo);
3829 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3830 qstats->total_unicast_packets_transmitted_hi,
3831 fstats->total_unicast_packets_transmitted_lo,
3832 qstats->total_unicast_packets_transmitted_lo);
3833 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3834 qstats->total_multicast_packets_transmitted_hi,
3835 fstats->total_multicast_packets_transmitted_lo,
3836 qstats->total_multicast_packets_transmitted_lo);
3837 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3838 qstats->total_broadcast_packets_transmitted_hi,
3839 fstats->total_broadcast_packets_transmitted_lo,
3840 qstats->total_broadcast_packets_transmitted_lo);
3841 ADD_64(fstats->valid_bytes_received_hi,
3842 qstats->valid_bytes_received_hi,
3843 fstats->valid_bytes_received_lo,
3844 qstats->valid_bytes_received_lo);
3846 ADD_64(estats->error_bytes_received_hi,
3847 qstats->error_bytes_received_hi,
3848 estats->error_bytes_received_lo,
3849 qstats->error_bytes_received_lo);
3850 ADD_64(estats->etherstatsoverrsizepkts_hi,
3851 qstats->etherstatsoverrsizepkts_hi,
3852 estats->etherstatsoverrsizepkts_lo,
3853 qstats->etherstatsoverrsizepkts_lo);
3854 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3855 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3858 ADD_64(fstats->total_bytes_received_hi,
3859 estats->rx_stat_ifhcinbadoctets_hi,
3860 fstats->total_bytes_received_lo,
3861 estats->rx_stat_ifhcinbadoctets_lo);
3863 memcpy(estats, &(fstats->total_bytes_received_hi),
3864 sizeof(struct host_func_stats) - 2*sizeof(u32));
3866 ADD_64(estats->etherstatsoverrsizepkts_hi,
3867 estats->rx_stat_dot3statsframestoolong_hi,
3868 estats->etherstatsoverrsizepkts_lo,
3869 estats->rx_stat_dot3statsframestoolong_lo);
3870 ADD_64(estats->error_bytes_received_hi,
3871 estats->rx_stat_ifhcinbadoctets_hi,
3872 estats->error_bytes_received_lo,
3873 estats->rx_stat_ifhcinbadoctets_lo);
3876 estats->mac_filter_discard =
3877 le32_to_cpu(tport->mac_filter_discard);
3878 estats->xxoverflow_discard =
3879 le32_to_cpu(tport->xxoverflow_discard);
3880 estats->brb_truncate_discard =
3881 le32_to_cpu(tport->brb_truncate_discard);
3882 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3885 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3887 bp->stats_pending = 0;
3892 static void bnx2x_net_stats_update(struct bnx2x *bp)
3894 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3895 struct net_device_stats *nstats = &bp->dev->stats;
3898 nstats->rx_packets =
3899 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3900 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3901 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3903 nstats->tx_packets =
3904 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3905 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3906 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3908 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3910 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3912 nstats->rx_dropped = estats->mac_discard;
3913 for_each_queue(bp, i)
3914 nstats->rx_dropped +=
3915 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3917 nstats->tx_dropped = 0;
3920 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3922 nstats->collisions =
3923 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3925 nstats->rx_length_errors =
3926 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3927 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3928 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3929 bnx2x_hilo(&estats->brb_truncate_hi);
3930 nstats->rx_crc_errors =
3931 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3932 nstats->rx_frame_errors =
3933 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3934 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3935 nstats->rx_missed_errors = estats->xxoverflow_discard;
3937 nstats->rx_errors = nstats->rx_length_errors +
3938 nstats->rx_over_errors +
3939 nstats->rx_crc_errors +
3940 nstats->rx_frame_errors +
3941 nstats->rx_fifo_errors +
3942 nstats->rx_missed_errors;
3944 nstats->tx_aborted_errors =
3945 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3946 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3947 nstats->tx_carrier_errors =
3948 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3949 nstats->tx_fifo_errors = 0;
3950 nstats->tx_heartbeat_errors = 0;
3951 nstats->tx_window_errors = 0;
3953 nstats->tx_errors = nstats->tx_aborted_errors +
3954 nstats->tx_carrier_errors +
3955 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3958 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3960 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3963 estats->driver_xoff = 0;
3964 estats->rx_err_discard_pkt = 0;
3965 estats->rx_skb_alloc_failed = 0;
3966 estats->hw_csum_err = 0;
3967 for_each_queue(bp, i) {
3968 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3970 estats->driver_xoff += qstats->driver_xoff;
3971 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3972 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3973 estats->hw_csum_err += qstats->hw_csum_err;
3977 static void bnx2x_stats_update(struct bnx2x *bp)
3979 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3981 if (*stats_comp != DMAE_COMP_VAL)
3985 bnx2x_hw_stats_update(bp);
3987 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3988 BNX2X_ERR("storm stats were not updated for 3 times\n");
3993 bnx2x_net_stats_update(bp);
3994 bnx2x_drv_stats_update(bp);
3996 if (bp->msglevel & NETIF_MSG_TIMER) {
3997 struct tstorm_per_client_stats *old_tclient =
3998 &bp->fp->old_tclient;
3999 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4000 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4001 struct net_device_stats *nstats = &bp->dev->stats;
4004 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4005 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4007 bnx2x_tx_avail(bp->fp),
4008 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4009 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4011 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4012 bp->fp->rx_comp_cons),
4013 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4014 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4015 "brb truncate %u\n",
4016 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4017 qstats->driver_xoff,
4018 estats->brb_drop_lo, estats->brb_truncate_lo);
4019 printk(KERN_DEBUG "tstats: checksum_discard %u "
4020 "packets_too_big_discard %lu no_buff_discard %lu "
4021 "mac_discard %u mac_filter_discard %u "
4022 "xxovrflow_discard %u brb_truncate_discard %u "
4023 "ttl0_discard %u\n",
4024 le32_to_cpu(old_tclient->checksum_discard),
4025 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4026 bnx2x_hilo(&qstats->no_buff_discard_hi),
4027 estats->mac_discard, estats->mac_filter_discard,
4028 estats->xxoverflow_discard, estats->brb_truncate_discard,
4029 le32_to_cpu(old_tclient->ttl0_discard));
4031 for_each_queue(bp, i) {
4032 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4033 bnx2x_fp(bp, i, tx_pkt),
4034 bnx2x_fp(bp, i, rx_pkt),
4035 bnx2x_fp(bp, i, rx_calls));
4039 bnx2x_hw_stats_post(bp);
4040 bnx2x_storm_stats_post(bp);
4043 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4045 struct dmae_command *dmae;
4047 int loader_idx = PMF_DMAE_C(bp);
4048 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4050 bp->executer_idx = 0;
4052 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4054 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4056 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4058 DMAE_CMD_ENDIANITY_DW_SWAP |
4060 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4061 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4063 if (bp->port.port_stx) {
4065 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4067 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4069 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4070 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4071 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4072 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4073 dmae->dst_addr_hi = 0;
4074 dmae->len = sizeof(struct host_port_stats) >> 2;
4076 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4077 dmae->comp_addr_hi = 0;
4080 dmae->comp_addr_lo =
4081 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4082 dmae->comp_addr_hi =
4083 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4084 dmae->comp_val = DMAE_COMP_VAL;
4092 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4093 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4094 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4095 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4096 dmae->dst_addr_lo = bp->func_stx >> 2;
4097 dmae->dst_addr_hi = 0;
4098 dmae->len = sizeof(struct host_func_stats) >> 2;
4099 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4100 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4101 dmae->comp_val = DMAE_COMP_VAL;
4107 static void bnx2x_stats_stop(struct bnx2x *bp)
4111 bnx2x_stats_comp(bp);
4114 update = (bnx2x_hw_stats_update(bp) == 0);
4116 update |= (bnx2x_storm_stats_update(bp) == 0);
4119 bnx2x_net_stats_update(bp);
4122 bnx2x_port_stats_stop(bp);
4124 bnx2x_hw_stats_post(bp);
4125 bnx2x_stats_comp(bp);
4129 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4133 static const struct {
4134 void (*action)(struct bnx2x *bp);
4135 enum bnx2x_stats_state next_state;
4136 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4139 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4140 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4141 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4142 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4145 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4146 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4147 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4148 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4152 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4154 enum bnx2x_stats_state state = bp->stats_state;
4156 bnx2x_stats_stm[state][event].action(bp);
4157 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4159 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4160 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4161 state, event, bp->stats_state);
4164 static void bnx2x_timer(unsigned long data)
4166 struct bnx2x *bp = (struct bnx2x *) data;
4168 if (!netif_running(bp->dev))
4171 if (atomic_read(&bp->intr_sem) != 0)
4175 struct bnx2x_fastpath *fp = &bp->fp[0];
4179 rc = bnx2x_rx_int(fp, 1000);
4182 if (!BP_NOMCP(bp)) {
4183 int func = BP_FUNC(bp);
4187 ++bp->fw_drv_pulse_wr_seq;
4188 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4189 /* TBD - add SYSTEM_TIME */
4190 drv_pulse = bp->fw_drv_pulse_wr_seq;
4191 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4193 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4194 MCP_PULSE_SEQ_MASK);
4195 /* The delta between driver pulse and mcp response
4196 * should be 1 (before mcp response) or 0 (after mcp response)
4198 if ((drv_pulse != mcp_pulse) &&
4199 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4200 /* someone lost a heartbeat... */
4201 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4202 drv_pulse, mcp_pulse);
4206 if ((bp->state == BNX2X_STATE_OPEN) ||
4207 (bp->state == BNX2X_STATE_DISABLED))
4208 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4211 mod_timer(&bp->timer, jiffies + bp->current_interval);
4214 /* end of Statistics */
4219 * nic init service functions
4222 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4224 int port = BP_PORT(bp);
4226 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4227 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4228 sizeof(struct ustorm_status_block)/4);
4229 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4230 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4231 sizeof(struct cstorm_status_block)/4);
4234 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4235 dma_addr_t mapping, int sb_id)
4237 int port = BP_PORT(bp);
4238 int func = BP_FUNC(bp);
4243 section = ((u64)mapping) + offsetof(struct host_status_block,
4245 sb->u_status_block.status_block_id = sb_id;
4247 REG_WR(bp, BAR_USTRORM_INTMEM +
4248 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4249 REG_WR(bp, BAR_USTRORM_INTMEM +
4250 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4252 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4253 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4255 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4256 REG_WR16(bp, BAR_USTRORM_INTMEM +
4257 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4260 section = ((u64)mapping) + offsetof(struct host_status_block,
4262 sb->c_status_block.status_block_id = sb_id;
4264 REG_WR(bp, BAR_CSTRORM_INTMEM +
4265 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4266 REG_WR(bp, BAR_CSTRORM_INTMEM +
4267 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4269 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4270 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4272 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4273 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4274 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4276 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4279 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4281 int func = BP_FUNC(bp);
4283 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4284 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4285 sizeof(struct tstorm_def_status_block)/4);
4286 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4287 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4288 sizeof(struct ustorm_def_status_block)/4);
4289 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4290 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4291 sizeof(struct cstorm_def_status_block)/4);
4292 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
4293 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4294 sizeof(struct xstorm_def_status_block)/4);
4297 static void bnx2x_init_def_sb(struct bnx2x *bp,
4298 struct host_def_status_block *def_sb,
4299 dma_addr_t mapping, int sb_id)
4301 int port = BP_PORT(bp);
4302 int func = BP_FUNC(bp);
4303 int index, val, reg_offset;
4307 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4308 atten_status_block);
4309 def_sb->atten_status_block.status_block_id = sb_id;
4313 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4314 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4316 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4317 bp->attn_group[index].sig[0] = REG_RD(bp,
4318 reg_offset + 0x10*index);
4319 bp->attn_group[index].sig[1] = REG_RD(bp,
4320 reg_offset + 0x4 + 0x10*index);
4321 bp->attn_group[index].sig[2] = REG_RD(bp,
4322 reg_offset + 0x8 + 0x10*index);
4323 bp->attn_group[index].sig[3] = REG_RD(bp,
4324 reg_offset + 0xc + 0x10*index);
4327 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4328 HC_REG_ATTN_MSG0_ADDR_L);
4330 REG_WR(bp, reg_offset, U64_LO(section));
4331 REG_WR(bp, reg_offset + 4, U64_HI(section));
4333 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4335 val = REG_RD(bp, reg_offset);
4337 REG_WR(bp, reg_offset, val);
4340 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4341 u_def_status_block);
4342 def_sb->u_def_status_block.status_block_id = sb_id;
4344 REG_WR(bp, BAR_USTRORM_INTMEM +
4345 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4346 REG_WR(bp, BAR_USTRORM_INTMEM +
4347 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4349 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4350 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4352 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4353 REG_WR16(bp, BAR_USTRORM_INTMEM +
4354 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4357 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4358 c_def_status_block);
4359 def_sb->c_def_status_block.status_block_id = sb_id;
4361 REG_WR(bp, BAR_CSTRORM_INTMEM +
4362 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4363 REG_WR(bp, BAR_CSTRORM_INTMEM +
4364 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4366 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4367 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4369 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4370 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4371 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4374 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4375 t_def_status_block);
4376 def_sb->t_def_status_block.status_block_id = sb_id;
4378 REG_WR(bp, BAR_TSTRORM_INTMEM +
4379 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4380 REG_WR(bp, BAR_TSTRORM_INTMEM +
4381 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4383 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4384 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4386 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4387 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4388 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4391 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4392 x_def_status_block);
4393 def_sb->x_def_status_block.status_block_id = sb_id;
4395 REG_WR(bp, BAR_XSTRORM_INTMEM +
4396 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4397 REG_WR(bp, BAR_XSTRORM_INTMEM +
4398 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4400 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4401 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4403 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4404 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4405 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4407 bp->stats_pending = 0;
4408 bp->set_mac_pending = 0;
4410 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4413 static void bnx2x_update_coalesce(struct bnx2x *bp)
4415 int port = BP_PORT(bp);
4418 for_each_queue(bp, i) {
4419 int sb_id = bp->fp[i].sb_id;
4421 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4422 REG_WR8(bp, BAR_USTRORM_INTMEM +
4423 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4424 U_SB_ETH_RX_CQ_INDEX),
4426 REG_WR16(bp, BAR_USTRORM_INTMEM +
4427 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4428 U_SB_ETH_RX_CQ_INDEX),
4429 bp->rx_ticks ? 0 : 1);
4431 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4432 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4433 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4434 C_SB_ETH_TX_CQ_INDEX),
4436 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4437 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4438 C_SB_ETH_TX_CQ_INDEX),
4439 bp->tx_ticks ? 0 : 1);
4443 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4444 struct bnx2x_fastpath *fp, int last)
4448 for (i = 0; i < last; i++) {
4449 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4450 struct sk_buff *skb = rx_buf->skb;
4453 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4457 if (fp->tpa_state[i] == BNX2X_TPA_START)
4458 pci_unmap_single(bp->pdev,
4459 pci_unmap_addr(rx_buf, mapping),
4460 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4467 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4469 int func = BP_FUNC(bp);
4470 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4471 ETH_MAX_AGGREGATION_QUEUES_E1H;
4472 u16 ring_prod, cqe_ring_prod;
4475 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4477 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4479 if (bp->flags & TPA_ENABLE_FLAG) {
4481 for_each_rx_queue(bp, j) {
4482 struct bnx2x_fastpath *fp = &bp->fp[j];
4484 for (i = 0; i < max_agg_queues; i++) {
4485 fp->tpa_pool[i].skb =
4486 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4487 if (!fp->tpa_pool[i].skb) {
4488 BNX2X_ERR("Failed to allocate TPA "
4489 "skb pool for queue[%d] - "
4490 "disabling TPA on this "
4492 bnx2x_free_tpa_pool(bp, fp, i);
4493 fp->disable_tpa = 1;
4496 pci_unmap_addr_set((struct sw_rx_bd *)
4497 &bp->fp->tpa_pool[i],
4499 fp->tpa_state[i] = BNX2X_TPA_STOP;
4504 for_each_rx_queue(bp, j) {
4505 struct bnx2x_fastpath *fp = &bp->fp[j];
4508 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4509 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4511 /* "next page" elements initialization */
4513 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4514 struct eth_rx_sge *sge;
4516 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4518 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4519 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4521 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4522 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4525 bnx2x_init_sge_ring_bit_mask(fp);
4528 for (i = 1; i <= NUM_RX_RINGS; i++) {
4529 struct eth_rx_bd *rx_bd;
4531 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4533 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4534 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4536 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4537 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4541 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4542 struct eth_rx_cqe_next_page *nextpg;
4544 nextpg = (struct eth_rx_cqe_next_page *)
4545 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4547 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4548 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4550 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4551 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4554 /* Allocate SGEs and initialize the ring elements */
4555 for (i = 0, ring_prod = 0;
4556 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4558 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4559 BNX2X_ERR("was only able to allocate "
4561 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4562 /* Cleanup already allocated elements */
4563 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4564 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4565 fp->disable_tpa = 1;
4569 ring_prod = NEXT_SGE_IDX(ring_prod);
4571 fp->rx_sge_prod = ring_prod;
4573 /* Allocate BDs and initialize BD ring */
4574 fp->rx_comp_cons = 0;
4575 cqe_ring_prod = ring_prod = 0;
4576 for (i = 0; i < bp->rx_ring_size; i++) {
4577 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4578 BNX2X_ERR("was only able to allocate "
4579 "%d rx skbs on queue[%d]\n", i, j);
4580 fp->eth_q_stats.rx_skb_alloc_failed++;
4583 ring_prod = NEXT_RX_IDX(ring_prod);
4584 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4585 WARN_ON(ring_prod <= i);
4588 fp->rx_bd_prod = ring_prod;
4589 /* must not have more available CQEs than BDs */
4590 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4592 fp->rx_pkt = fp->rx_calls = 0;
4595 * this will generate an interrupt (to the TSTORM)
4596 * must only be done after chip is initialized
4598 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4603 REG_WR(bp, BAR_USTRORM_INTMEM +
4604 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4605 U64_LO(fp->rx_comp_mapping));
4606 REG_WR(bp, BAR_USTRORM_INTMEM +
4607 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4608 U64_HI(fp->rx_comp_mapping));
4612 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4616 for_each_tx_queue(bp, j) {
4617 struct bnx2x_fastpath *fp = &bp->fp[j];
4619 for (i = 1; i <= NUM_TX_RINGS; i++) {
4620 struct eth_tx_bd *tx_bd =
4621 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4624 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4625 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4627 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4628 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4631 fp->tx_pkt_prod = 0;
4632 fp->tx_pkt_cons = 0;
4635 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4640 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4642 int func = BP_FUNC(bp);
4644 spin_lock_init(&bp->spq_lock);
4646 bp->spq_left = MAX_SPQ_PENDING;
4647 bp->spq_prod_idx = 0;
4648 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4649 bp->spq_prod_bd = bp->spq;
4650 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4652 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4653 U64_LO(bp->spq_mapping));
4655 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4656 U64_HI(bp->spq_mapping));
4658 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4662 static void bnx2x_init_context(struct bnx2x *bp)
4666 for_each_queue(bp, i) {
4667 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4668 struct bnx2x_fastpath *fp = &bp->fp[i];
4669 u8 cl_id = fp->cl_id;
4670 u8 sb_id = fp->sb_id;
4672 context->ustorm_st_context.common.sb_index_numbers =
4673 BNX2X_RX_SB_INDEX_NUM;
4674 context->ustorm_st_context.common.clientId = cl_id;
4675 context->ustorm_st_context.common.status_block_id = sb_id;
4676 context->ustorm_st_context.common.flags =
4677 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4678 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4679 context->ustorm_st_context.common.statistics_counter_id =
4681 context->ustorm_st_context.common.mc_alignment_log_size =
4682 BNX2X_RX_ALIGN_SHIFT;
4683 context->ustorm_st_context.common.bd_buff_size =
4685 context->ustorm_st_context.common.bd_page_base_hi =
4686 U64_HI(fp->rx_desc_mapping);
4687 context->ustorm_st_context.common.bd_page_base_lo =
4688 U64_LO(fp->rx_desc_mapping);
4689 if (!fp->disable_tpa) {
4690 context->ustorm_st_context.common.flags |=
4691 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4692 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4693 context->ustorm_st_context.common.sge_buff_size =
4694 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4696 context->ustorm_st_context.common.sge_page_base_hi =
4697 U64_HI(fp->rx_sge_mapping);
4698 context->ustorm_st_context.common.sge_page_base_lo =
4699 U64_LO(fp->rx_sge_mapping);
4702 context->ustorm_ag_context.cdu_usage =
4703 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4704 CDU_REGION_NUMBER_UCM_AG,
4705 ETH_CONNECTION_TYPE);
4707 context->xstorm_st_context.tx_bd_page_base_hi =
4708 U64_HI(fp->tx_desc_mapping);
4709 context->xstorm_st_context.tx_bd_page_base_lo =
4710 U64_LO(fp->tx_desc_mapping);
4711 context->xstorm_st_context.db_data_addr_hi =
4712 U64_HI(fp->tx_prods_mapping);
4713 context->xstorm_st_context.db_data_addr_lo =
4714 U64_LO(fp->tx_prods_mapping);
4715 context->xstorm_st_context.statistics_data = (cl_id |
4716 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4717 context->cstorm_st_context.sb_index_number =
4718 C_SB_ETH_TX_CQ_INDEX;
4719 context->cstorm_st_context.status_block_id = sb_id;
4721 context->xstorm_ag_context.cdu_reserved =
4722 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4723 CDU_REGION_NUMBER_XCM_AG,
4724 ETH_CONNECTION_TYPE);
4728 static void bnx2x_init_ind_table(struct bnx2x *bp)
4730 int func = BP_FUNC(bp);
4733 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4737 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4738 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4739 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4740 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4741 bp->fp->cl_id + (i % bp->num_rx_queues));
4744 static void bnx2x_set_client_config(struct bnx2x *bp)
4746 struct tstorm_eth_client_config tstorm_client = {0};
4747 int port = BP_PORT(bp);
4750 tstorm_client.mtu = bp->dev->mtu;
4751 tstorm_client.config_flags =
4752 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4753 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4755 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4756 tstorm_client.config_flags |=
4757 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4758 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4762 if (bp->flags & TPA_ENABLE_FLAG) {
4763 tstorm_client.max_sges_for_packet =
4764 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4765 tstorm_client.max_sges_for_packet =
4766 ((tstorm_client.max_sges_for_packet +
4767 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4768 PAGES_PER_SGE_SHIFT;
4770 tstorm_client.config_flags |=
4771 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4774 for_each_queue(bp, i) {
4775 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4777 REG_WR(bp, BAR_TSTRORM_INTMEM +
4778 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4779 ((u32 *)&tstorm_client)[0]);
4780 REG_WR(bp, BAR_TSTRORM_INTMEM +
4781 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4782 ((u32 *)&tstorm_client)[1]);
4785 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4786 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4789 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4791 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4792 int mode = bp->rx_mode;
4793 int mask = (1 << BP_L_ID(bp));
4794 int func = BP_FUNC(bp);
4797 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4800 case BNX2X_RX_MODE_NONE: /* no Rx */
4801 tstorm_mac_filter.ucast_drop_all = mask;
4802 tstorm_mac_filter.mcast_drop_all = mask;
4803 tstorm_mac_filter.bcast_drop_all = mask;
4806 case BNX2X_RX_MODE_NORMAL:
4807 tstorm_mac_filter.bcast_accept_all = mask;
4810 case BNX2X_RX_MODE_ALLMULTI:
4811 tstorm_mac_filter.mcast_accept_all = mask;
4812 tstorm_mac_filter.bcast_accept_all = mask;
4815 case BNX2X_RX_MODE_PROMISC:
4816 tstorm_mac_filter.ucast_accept_all = mask;
4817 tstorm_mac_filter.mcast_accept_all = mask;
4818 tstorm_mac_filter.bcast_accept_all = mask;
4822 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4826 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4827 REG_WR(bp, BAR_TSTRORM_INTMEM +
4828 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4829 ((u32 *)&tstorm_mac_filter)[i]);
4831 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4832 ((u32 *)&tstorm_mac_filter)[i]); */
4835 if (mode != BNX2X_RX_MODE_NONE)
4836 bnx2x_set_client_config(bp);
4839 static void bnx2x_init_internal_common(struct bnx2x *bp)
4843 if (bp->flags & TPA_ENABLE_FLAG) {
4844 struct tstorm_eth_tpa_exist tpa = {0};
4848 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4850 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4854 /* Zero this manually as its initialization is
4855 currently missing in the initTool */
4856 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4857 REG_WR(bp, BAR_USTRORM_INTMEM +
4858 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4861 static void bnx2x_init_internal_port(struct bnx2x *bp)
4863 int port = BP_PORT(bp);
4865 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4866 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4867 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4868 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4871 /* Calculates the sum of vn_min_rates.
4872 It's needed for further normalizing of the min_rates.
4874 sum of vn_min_rates.
4876 0 - if all the min_rates are 0.
4877 In the later case fainess algorithm should be deactivated.
4878 If not all min_rates are zero then those that are zeroes will be set to 1.
4880 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4883 int port = BP_PORT(bp);
4886 bp->vn_weight_sum = 0;
4887 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4888 int func = 2*vn + port;
4890 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4891 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4892 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4894 /* Skip hidden vns */
4895 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4898 /* If min rate is zero - set it to 1 */
4900 vn_min_rate = DEF_MIN_RATE;
4904 bp->vn_weight_sum += vn_min_rate;
4907 /* ... only if all min rates are zeros - disable fairness */
4909 bp->vn_weight_sum = 0;
4912 static void bnx2x_init_internal_func(struct bnx2x *bp)
4914 struct tstorm_eth_function_common_config tstorm_config = {0};
4915 struct stats_indication_flags stats_flags = {0};
4916 int port = BP_PORT(bp);
4917 int func = BP_FUNC(bp);
4923 tstorm_config.config_flags = MULTI_FLAGS(bp);
4924 tstorm_config.rss_result_mask = MULTI_MASK;
4927 tstorm_config.config_flags |=
4928 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4930 tstorm_config.leading_client_id = BP_L_ID(bp);
4932 REG_WR(bp, BAR_TSTRORM_INTMEM +
4933 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4934 (*(u32 *)&tstorm_config));
4936 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4937 bnx2x_set_storm_rx_mode(bp);
4939 for_each_queue(bp, i) {
4940 u8 cl_id = bp->fp[i].cl_id;
4942 /* reset xstorm per client statistics */
4943 offset = BAR_XSTRORM_INTMEM +
4944 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4946 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4947 REG_WR(bp, offset + j*4, 0);
4949 /* reset tstorm per client statistics */
4950 offset = BAR_TSTRORM_INTMEM +
4951 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4953 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4954 REG_WR(bp, offset + j*4, 0);
4956 /* reset ustorm per client statistics */
4957 offset = BAR_USTRORM_INTMEM +
4958 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4960 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4961 REG_WR(bp, offset + j*4, 0);
4964 /* Init statistics related context */
4965 stats_flags.collect_eth = 1;
4967 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4968 ((u32 *)&stats_flags)[0]);
4969 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4970 ((u32 *)&stats_flags)[1]);
4972 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4973 ((u32 *)&stats_flags)[0]);
4974 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4975 ((u32 *)&stats_flags)[1]);
4977 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4978 ((u32 *)&stats_flags)[0]);
4979 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4980 ((u32 *)&stats_flags)[1]);
4982 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4983 ((u32 *)&stats_flags)[0]);
4984 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4985 ((u32 *)&stats_flags)[1]);
4987 REG_WR(bp, BAR_XSTRORM_INTMEM +
4988 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4989 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4990 REG_WR(bp, BAR_XSTRORM_INTMEM +
4991 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4992 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4994 REG_WR(bp, BAR_TSTRORM_INTMEM +
4995 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4996 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4997 REG_WR(bp, BAR_TSTRORM_INTMEM +
4998 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4999 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5001 REG_WR(bp, BAR_USTRORM_INTMEM +
5002 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5003 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5004 REG_WR(bp, BAR_USTRORM_INTMEM +
5005 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5006 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5008 if (CHIP_IS_E1H(bp)) {
5009 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5011 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5013 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5015 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5018 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5022 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5024 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5025 SGE_PAGE_SIZE * PAGES_PER_SGE),
5027 for_each_rx_queue(bp, i) {
5028 struct bnx2x_fastpath *fp = &bp->fp[i];
5030 REG_WR(bp, BAR_USTRORM_INTMEM +
5031 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5032 U64_LO(fp->rx_comp_mapping));
5033 REG_WR(bp, BAR_USTRORM_INTMEM +
5034 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5035 U64_HI(fp->rx_comp_mapping));
5037 REG_WR16(bp, BAR_USTRORM_INTMEM +
5038 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5042 /* dropless flow control */
5043 if (CHIP_IS_E1H(bp)) {
5044 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5046 rx_pause.bd_thr_low = 250;
5047 rx_pause.cqe_thr_low = 250;
5049 rx_pause.sge_thr_low = 0;
5050 rx_pause.bd_thr_high = 350;
5051 rx_pause.cqe_thr_high = 350;
5052 rx_pause.sge_thr_high = 0;
5054 for_each_rx_queue(bp, i) {
5055 struct bnx2x_fastpath *fp = &bp->fp[i];
5057 if (!fp->disable_tpa) {
5058 rx_pause.sge_thr_low = 150;
5059 rx_pause.sge_thr_high = 250;
5063 offset = BAR_USTRORM_INTMEM +
5064 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5067 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5069 REG_WR(bp, offset + j*4,
5070 ((u32 *)&rx_pause)[j]);
5074 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5076 /* Init rate shaping and fairness contexts */
5080 /* During init there is no active link
5081 Until link is up, set link rate to 10Gbps */
5082 bp->link_vars.line_speed = SPEED_10000;
5083 bnx2x_init_port_minmax(bp);
5085 bnx2x_calc_vn_weight_sum(bp);
5087 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5088 bnx2x_init_vn_minmax(bp, 2*vn + port);
5090 /* Enable rate shaping and fairness */
5091 bp->cmng.flags.cmng_enables =
5092 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5093 if (bp->vn_weight_sum)
5094 bp->cmng.flags.cmng_enables |=
5095 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5097 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5098 " fairness will be disabled\n");
5100 /* rate shaping and fairness are disabled */
5102 "single function mode minmax will be disabled\n");
5106 /* Store it to internal memory */
5108 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5109 REG_WR(bp, BAR_XSTRORM_INTMEM +
5110 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5111 ((u32 *)(&bp->cmng))[i]);
5114 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5116 switch (load_code) {
5117 case FW_MSG_CODE_DRV_LOAD_COMMON:
5118 bnx2x_init_internal_common(bp);
5121 case FW_MSG_CODE_DRV_LOAD_PORT:
5122 bnx2x_init_internal_port(bp);
5125 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5126 bnx2x_init_internal_func(bp);
5130 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5135 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5139 for_each_queue(bp, i) {
5140 struct bnx2x_fastpath *fp = &bp->fp[i];
5143 fp->state = BNX2X_FP_STATE_CLOSED;
5145 fp->cl_id = BP_L_ID(bp) + i;
5146 fp->sb_id = fp->cl_id;
5148 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5149 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5150 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5152 bnx2x_update_fpsb_idx(fp);
5155 /* ensure status block indices were read */
5159 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5161 bnx2x_update_dsb_idx(bp);
5162 bnx2x_update_coalesce(bp);
5163 bnx2x_init_rx_rings(bp);
5164 bnx2x_init_tx_ring(bp);
5165 bnx2x_init_sp_ring(bp);
5166 bnx2x_init_context(bp);
5167 bnx2x_init_internal(bp, load_code);
5168 bnx2x_init_ind_table(bp);
5169 bnx2x_stats_init(bp);
5171 /* At this point, we are ready for interrupts */
5172 atomic_set(&bp->intr_sem, 0);
5174 /* flush all before enabling interrupts */
5178 bnx2x_int_enable(bp);
5181 /* end of nic init */
5184 * gzip service functions
5187 static int bnx2x_gunzip_init(struct bnx2x *bp)
5189 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5190 &bp->gunzip_mapping);
5191 if (bp->gunzip_buf == NULL)
5194 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5195 if (bp->strm == NULL)
5198 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5200 if (bp->strm->workspace == NULL)
5210 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5211 bp->gunzip_mapping);
5212 bp->gunzip_buf = NULL;
5215 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5216 " un-compression\n", bp->dev->name);
5220 static void bnx2x_gunzip_end(struct bnx2x *bp)
5222 kfree(bp->strm->workspace);
5227 if (bp->gunzip_buf) {
5228 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5229 bp->gunzip_mapping);
5230 bp->gunzip_buf = NULL;
5234 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5238 /* check gzip header */
5239 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5246 if (zbuf[3] & FNAME)
5247 while ((zbuf[n++] != 0) && (n < len));
5249 bp->strm->next_in = zbuf + n;
5250 bp->strm->avail_in = len - n;
5251 bp->strm->next_out = bp->gunzip_buf;
5252 bp->strm->avail_out = FW_BUF_SIZE;
5254 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5258 rc = zlib_inflate(bp->strm, Z_FINISH);
5259 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5260 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5261 bp->dev->name, bp->strm->msg);
5263 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5264 if (bp->gunzip_outlen & 0x3)
5265 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5266 " gunzip_outlen (%d) not aligned\n",
5267 bp->dev->name, bp->gunzip_outlen);
5268 bp->gunzip_outlen >>= 2;
5270 zlib_inflateEnd(bp->strm);
5272 if (rc == Z_STREAM_END)
5278 /* nic load/unload */
5281 * General service functions
5284 /* send a NIG loopback debug packet */
5285 static void bnx2x_lb_pckt(struct bnx2x *bp)
5289 /* Ethernet source and destination addresses */
5290 wb_write[0] = 0x55555555;
5291 wb_write[1] = 0x55555555;
5292 wb_write[2] = 0x20; /* SOP */
5293 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5295 /* NON-IP protocol */
5296 wb_write[0] = 0x09000000;
5297 wb_write[1] = 0x55555555;
5298 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5299 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5302 /* some of the internal memories
5303 * are not directly readable from the driver
5304 * to test them we send debug packets
5306 static int bnx2x_int_mem_test(struct bnx2x *bp)
5312 if (CHIP_REV_IS_FPGA(bp))
5314 else if (CHIP_REV_IS_EMUL(bp))
5319 DP(NETIF_MSG_HW, "start part1\n");
5321 /* Disable inputs of parser neighbor blocks */
5322 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5323 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5324 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5325 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5327 /* Write 0 to parser credits for CFC search request */
5328 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5330 /* send Ethernet packet */
5333 /* TODO do i reset NIG statistic? */
5334 /* Wait until NIG register shows 1 packet of size 0x10 */
5335 count = 1000 * factor;
5338 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5339 val = *bnx2x_sp(bp, wb_data[0]);
5347 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5351 /* Wait until PRS register shows 1 packet */
5352 count = 1000 * factor;
5354 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5362 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5366 /* Reset and init BRB, PRS */
5367 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5369 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5371 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5372 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5374 DP(NETIF_MSG_HW, "part2\n");
5376 /* Disable inputs of parser neighbor blocks */
5377 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5378 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5379 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5380 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5382 /* Write 0 to parser credits for CFC search request */
5383 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5385 /* send 10 Ethernet packets */
5386 for (i = 0; i < 10; i++)
5389 /* Wait until NIG register shows 10 + 1
5390 packets of size 11*0x10 = 0xb0 */
5391 count = 1000 * factor;
5394 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5395 val = *bnx2x_sp(bp, wb_data[0]);
5403 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5407 /* Wait until PRS register shows 2 packets */
5408 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5410 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5412 /* Write 1 to parser credits for CFC search request */
5413 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5415 /* Wait until PRS register shows 3 packets */
5416 msleep(10 * factor);
5417 /* Wait until NIG register shows 1 packet of size 0x10 */
5418 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5420 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5422 /* clear NIG EOP FIFO */
5423 for (i = 0; i < 11; i++)
5424 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5425 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5427 BNX2X_ERR("clear of NIG failed\n");
5431 /* Reset and init BRB, PRS, NIG */
5432 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5434 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5436 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5437 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5440 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5443 /* Enable inputs of parser neighbor blocks */
5444 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5445 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5446 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5447 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5449 DP(NETIF_MSG_HW, "done\n");
5454 static void enable_blocks_attention(struct bnx2x *bp)
5456 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5457 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5458 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5459 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5460 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5461 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5462 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5463 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5464 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5465 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5466 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5467 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5468 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5469 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5470 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5471 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5472 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5473 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5474 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5475 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5476 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5477 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5478 if (CHIP_REV_IS_FPGA(bp))
5479 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5481 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5482 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5483 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5484 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5485 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5486 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5487 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5488 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5489 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5490 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5494 static void bnx2x_reset_common(struct bnx2x *bp)
5497 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5499 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5502 static int bnx2x_init_common(struct bnx2x *bp)
5506 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5508 bnx2x_reset_common(bp);
5509 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5510 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5512 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5513 if (CHIP_IS_E1H(bp))
5514 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5516 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5518 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5520 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5521 if (CHIP_IS_E1(bp)) {
5522 /* enable HW interrupt from PXP on USDM overflow
5523 bit 16 on INT_MASK_0 */
5524 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5527 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5531 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5532 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5533 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5534 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5535 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5536 /* make sure this value is 0 */
5537 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5539 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5540 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5541 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5542 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5543 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5546 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5548 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5549 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5550 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5553 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5554 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5556 /* let the HW do it's magic ... */
5558 /* finish PXP init */
5559 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5561 BNX2X_ERR("PXP2 CFG failed\n");
5564 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5566 BNX2X_ERR("PXP2 RD_INIT failed\n");
5570 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5571 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5573 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5575 /* clean the DMAE memory */
5577 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5579 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5580 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5581 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5582 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5584 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5585 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5586 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5587 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5589 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5590 /* soft reset pulse */
5591 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5592 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5595 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5598 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5599 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5600 if (!CHIP_REV_IS_SLOW(bp)) {
5601 /* enable hw interrupt from doorbell Q */
5602 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5605 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5606 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5607 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5609 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5610 if (CHIP_IS_E1H(bp))
5611 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5613 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5614 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5615 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5616 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5618 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5619 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5620 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5621 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5623 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5624 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5625 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5626 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5629 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5631 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5634 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5635 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5636 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5638 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5639 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5640 REG_WR(bp, i, 0xc0cac01a);
5641 /* TODO: replace with something meaningful */
5643 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5644 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5646 if (sizeof(union cdu_context) != 1024)
5647 /* we currently assume that a context is 1024 bytes */
5648 printk(KERN_ALERT PFX "please adjust the size of"
5649 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5651 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5652 val = (4 << 24) + (0 << 12) + 1024;
5653 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5654 if (CHIP_IS_E1(bp)) {
5655 /* !!! fix pxp client crdit until excel update */
5656 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5657 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5660 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5661 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5662 /* enable context validation interrupt from CFC */
5663 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5665 /* set the thresholds to prevent CFC/CDU race */
5666 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5668 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5669 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5671 /* PXPCS COMMON comes here */
5672 /* Reset PCIE errors for debug */
5673 REG_WR(bp, 0x2814, 0xffffffff);
5674 REG_WR(bp, 0x3820, 0xffffffff);
5676 /* EMAC0 COMMON comes here */
5677 /* EMAC1 COMMON comes here */
5678 /* DBU COMMON comes here */
5679 /* DBG COMMON comes here */
5681 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5682 if (CHIP_IS_E1H(bp)) {
5683 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5684 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5687 if (CHIP_REV_IS_SLOW(bp))
5690 /* finish CFC init */
5691 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5693 BNX2X_ERR("CFC LL_INIT failed\n");
5696 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5698 BNX2X_ERR("CFC AC_INIT failed\n");
5701 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5703 BNX2X_ERR("CFC CAM_INIT failed\n");
5706 REG_WR(bp, CFC_REG_DEBUG0, 0);
5708 /* read NIG statistic
5709 to see if this is our first up since powerup */
5710 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5711 val = *bnx2x_sp(bp, wb_data[0]);
5713 /* do internal memory self test */
5714 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5715 BNX2X_ERR("internal mem self test failed\n");
5719 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5720 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5721 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5722 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5723 bp->port.need_hw_lock = 1;
5726 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5727 /* Fan failure is indicated by SPIO 5 */
5728 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5729 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5731 /* set to active low mode */
5732 val = REG_RD(bp, MISC_REG_SPIO_INT);
5733 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5734 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5735 REG_WR(bp, MISC_REG_SPIO_INT, val);
5737 /* enable interrupt to signal the IGU */
5738 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5739 val |= (1 << MISC_REGISTERS_SPIO_5);
5740 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5747 /* clear PXP2 attentions */
5748 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5750 enable_blocks_attention(bp);
5752 if (!BP_NOMCP(bp)) {
5753 bnx2x_acquire_phy_lock(bp);
5754 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5755 bnx2x_release_phy_lock(bp);
5757 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5762 static int bnx2x_init_port(struct bnx2x *bp)
5764 int port = BP_PORT(bp);
5768 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5770 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5772 /* Port PXP comes here */
5773 /* Port PXP2 comes here */
5778 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5779 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5780 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5781 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5786 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5787 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5788 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5789 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5794 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5795 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5796 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5797 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5799 /* Port CMs come here */
5800 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5801 (port ? XCM_PORT1_END : XCM_PORT0_END));
5803 /* Port QM comes here */
5805 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5806 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5808 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5809 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5811 /* Port DQ comes here */
5813 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5814 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5815 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5816 /* no pause for emulation and FPGA */
5821 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5822 else if (bp->dev->mtu > 4096) {
5823 if (bp->flags & ONE_PORT_FLAG)
5827 /* (24*1024 + val*4)/256 */
5828 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5831 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5832 high = low + 56; /* 14*1024/256 */
5834 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5835 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5838 /* Port PRS comes here */
5839 /* Port TSDM comes here */
5840 /* Port CSDM comes here */
5841 /* Port USDM comes here */
5842 /* Port XSDM comes here */
5844 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5845 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5846 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5847 port ? USEM_PORT1_END : USEM_PORT0_END);
5848 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5849 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5850 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5851 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5853 /* Port UPB comes here */
5854 /* Port XPB comes here */
5856 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5857 port ? PBF_PORT1_END : PBF_PORT0_END);
5859 /* configure PBF to work without PAUSE mtu 9000 */
5860 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5862 /* update threshold */
5863 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5864 /* update init credit */
5865 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5868 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5870 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5873 /* tell the searcher where the T2 table is */
5874 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5876 wb_write[0] = U64_LO(bp->t2_mapping);
5877 wb_write[1] = U64_HI(bp->t2_mapping);
5878 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5879 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5880 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5881 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5883 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5884 /* Port SRCH comes here */
5886 /* Port CDU comes here */
5887 /* Port CFC comes here */
5889 if (CHIP_IS_E1(bp)) {
5890 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5891 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5893 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5894 port ? HC_PORT1_END : HC_PORT0_END);
5896 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5897 MISC_AEU_PORT0_START,
5898 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5899 /* init aeu_mask_attn_func_0/1:
5900 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5901 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5902 * bits 4-7 are used for "per vn group attention" */
5903 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5904 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5906 /* Port PXPCS comes here */
5907 /* Port EMAC0 comes here */
5908 /* Port EMAC1 comes here */
5909 /* Port DBU comes here */
5910 /* Port DBG comes here */
5912 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5913 port ? NIG_PORT1_END : NIG_PORT0_END);
5915 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5917 if (CHIP_IS_E1H(bp)) {
5918 /* 0x2 disable e1hov, 0x1 enable */
5919 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5920 (IS_E1HMF(bp) ? 0x1 : 0x2));
5922 /* support pause requests from USDM, TSDM and BRB */
5923 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5926 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5927 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5928 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5932 /* Port MCP comes here */
5933 /* Port DMAE comes here */
5935 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5936 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5938 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5940 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5941 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5943 /* The GPIO should be swapped if the swap register is
5945 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5946 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5948 /* Select function upon port-swap configuration */
5950 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5951 aeu_gpio_mask = (swap_val && swap_override) ?
5952 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5953 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5955 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5956 aeu_gpio_mask = (swap_val && swap_override) ?
5957 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5958 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5960 val = REG_RD(bp, offset);
5961 /* add GPIO3 to group */
5962 val |= aeu_gpio_mask;
5963 REG_WR(bp, offset, val);
5967 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5968 /* add SPIO 5 to group 0 */
5969 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5970 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5971 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5978 bnx2x__link_reset(bp);
5983 #define ILT_PER_FUNC (768/2)
5984 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5985 /* the phys address is shifted right 12 bits and has an added
5986 1=valid bit added to the 53rd bit
5987 then since this is a wide register(TM)
5988 we split it into two 32 bit writes
5990 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5991 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5992 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5993 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5995 #define CNIC_ILT_LINES 0
5997 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6001 if (CHIP_IS_E1H(bp))
6002 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6004 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6006 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6009 static int bnx2x_init_func(struct bnx2x *bp)
6011 int port = BP_PORT(bp);
6012 int func = BP_FUNC(bp);
6016 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6018 /* set MSI reconfigure capability */
6019 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6020 val = REG_RD(bp, addr);
6021 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6022 REG_WR(bp, addr, val);
6024 i = FUNC_ILT_BASE(func);
6026 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6027 if (CHIP_IS_E1H(bp)) {
6028 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6029 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6031 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6032 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6035 if (CHIP_IS_E1H(bp)) {
6036 for (i = 0; i < 9; i++)
6037 bnx2x_init_block(bp,
6038 cm_start[func][i], cm_end[func][i]);
6040 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6041 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6044 /* HC init per function */
6045 if (CHIP_IS_E1H(bp)) {
6046 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6048 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6049 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6051 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6053 /* Reset PCIE errors for debug */
6054 REG_WR(bp, 0x2114, 0xffffffff);
6055 REG_WR(bp, 0x2120, 0xffffffff);
6060 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6064 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6065 BP_FUNC(bp), load_code);
6068 mutex_init(&bp->dmae_mutex);
6069 bnx2x_gunzip_init(bp);
6071 switch (load_code) {
6072 case FW_MSG_CODE_DRV_LOAD_COMMON:
6073 rc = bnx2x_init_common(bp);
6078 case FW_MSG_CODE_DRV_LOAD_PORT:
6080 rc = bnx2x_init_port(bp);
6085 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6087 rc = bnx2x_init_func(bp);
6093 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6097 if (!BP_NOMCP(bp)) {
6098 int func = BP_FUNC(bp);
6100 bp->fw_drv_pulse_wr_seq =
6101 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6102 DRV_PULSE_SEQ_MASK);
6103 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6104 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6105 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6109 /* this needs to be done before gunzip end */
6110 bnx2x_zero_def_sb(bp);
6111 for_each_queue(bp, i)
6112 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6115 bnx2x_gunzip_end(bp);
6120 /* send the MCP a request, block until there is a reply */
6121 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6123 int func = BP_FUNC(bp);
6124 u32 seq = ++bp->fw_seq;
6127 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6129 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6130 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6133 /* let the FW do it's magic ... */
6136 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6138 /* Give the FW up to 2 second (200*10ms) */
6139 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6141 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6142 cnt*delay, rc, seq);
6144 /* is this a reply to our command? */
6145 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6146 rc &= FW_MSG_CODE_MASK;
6150 BNX2X_ERR("FW failed to respond!\n");
6158 static void bnx2x_free_mem(struct bnx2x *bp)
6161 #define BNX2X_PCI_FREE(x, y, size) \
6164 pci_free_consistent(bp->pdev, size, x, y); \
6170 #define BNX2X_FREE(x) \
6182 for_each_queue(bp, i) {
6185 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6186 bnx2x_fp(bp, i, status_blk_mapping),
6187 sizeof(struct host_status_block) +
6188 sizeof(struct eth_tx_db_data));
6191 for_each_rx_queue(bp, i) {
6193 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6194 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6195 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6196 bnx2x_fp(bp, i, rx_desc_mapping),
6197 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6199 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6200 bnx2x_fp(bp, i, rx_comp_mapping),
6201 sizeof(struct eth_fast_path_rx_cqe) *
6205 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6206 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6207 bnx2x_fp(bp, i, rx_sge_mapping),
6208 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6211 for_each_tx_queue(bp, i) {
6213 /* fastpath tx rings: tx_buf tx_desc */
6214 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6215 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6216 bnx2x_fp(bp, i, tx_desc_mapping),
6217 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6219 /* end of fastpath */
6221 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6222 sizeof(struct host_def_status_block));
6224 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6225 sizeof(struct bnx2x_slowpath));
6228 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6229 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6230 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6231 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6233 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6235 #undef BNX2X_PCI_FREE
6239 static int bnx2x_alloc_mem(struct bnx2x *bp)
6242 #define BNX2X_PCI_ALLOC(x, y, size) \
6244 x = pci_alloc_consistent(bp->pdev, size, y); \
6246 goto alloc_mem_err; \
6247 memset(x, 0, size); \
6250 #define BNX2X_ALLOC(x, size) \
6252 x = vmalloc(size); \
6254 goto alloc_mem_err; \
6255 memset(x, 0, size); \
6262 for_each_queue(bp, i) {
6263 bnx2x_fp(bp, i, bp) = bp;
6266 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6267 &bnx2x_fp(bp, i, status_blk_mapping),
6268 sizeof(struct host_status_block) +
6269 sizeof(struct eth_tx_db_data));
6272 for_each_rx_queue(bp, i) {
6274 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6275 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6276 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6277 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6278 &bnx2x_fp(bp, i, rx_desc_mapping),
6279 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6281 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6282 &bnx2x_fp(bp, i, rx_comp_mapping),
6283 sizeof(struct eth_fast_path_rx_cqe) *
6287 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6288 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6289 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6290 &bnx2x_fp(bp, i, rx_sge_mapping),
6291 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6294 for_each_tx_queue(bp, i) {
6296 bnx2x_fp(bp, i, hw_tx_prods) =
6297 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6299 bnx2x_fp(bp, i, tx_prods_mapping) =
6300 bnx2x_fp(bp, i, status_blk_mapping) +
6301 sizeof(struct host_status_block);
6303 /* fastpath tx rings: tx_buf tx_desc */
6304 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6305 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6306 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6307 &bnx2x_fp(bp, i, tx_desc_mapping),
6308 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6310 /* end of fastpath */
6312 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6313 sizeof(struct host_def_status_block));
6315 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6316 sizeof(struct bnx2x_slowpath));
6319 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6322 for (i = 0; i < 64*1024; i += 64) {
6323 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6324 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6327 /* allocate searcher T2 table
6328 we allocate 1/4 of alloc num for T2
6329 (which is not entered into the ILT) */
6330 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6333 for (i = 0; i < 16*1024; i += 64)
6334 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6336 /* now fixup the last line in the block to point to the next block */
6337 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6339 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6340 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6342 /* QM queues (128*MAX_CONN) */
6343 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6346 /* Slow path ring */
6347 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6355 #undef BNX2X_PCI_ALLOC
6359 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6363 for_each_tx_queue(bp, i) {
6364 struct bnx2x_fastpath *fp = &bp->fp[i];
6366 u16 bd_cons = fp->tx_bd_cons;
6367 u16 sw_prod = fp->tx_pkt_prod;
6368 u16 sw_cons = fp->tx_pkt_cons;
6370 while (sw_cons != sw_prod) {
6371 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6377 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6381 for_each_rx_queue(bp, j) {
6382 struct bnx2x_fastpath *fp = &bp->fp[j];
6384 for (i = 0; i < NUM_RX_BD; i++) {
6385 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6386 struct sk_buff *skb = rx_buf->skb;
6391 pci_unmap_single(bp->pdev,
6392 pci_unmap_addr(rx_buf, mapping),
6393 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6398 if (!fp->disable_tpa)
6399 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6400 ETH_MAX_AGGREGATION_QUEUES_E1 :
6401 ETH_MAX_AGGREGATION_QUEUES_E1H);
6405 static void bnx2x_free_skbs(struct bnx2x *bp)
6407 bnx2x_free_tx_skbs(bp);
6408 bnx2x_free_rx_skbs(bp);
6411 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6415 free_irq(bp->msix_table[0].vector, bp->dev);
6416 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6417 bp->msix_table[0].vector);
6419 for_each_queue(bp, i) {
6420 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6421 "state %x\n", i, bp->msix_table[i + offset].vector,
6422 bnx2x_fp(bp, i, state));
6424 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6428 static void bnx2x_free_irq(struct bnx2x *bp)
6430 if (bp->flags & USING_MSIX_FLAG) {
6431 bnx2x_free_msix_irqs(bp);
6432 pci_disable_msix(bp->pdev);
6433 bp->flags &= ~USING_MSIX_FLAG;
6435 } else if (bp->flags & USING_MSI_FLAG) {
6436 free_irq(bp->pdev->irq, bp->dev);
6437 pci_disable_msi(bp->pdev);
6438 bp->flags &= ~USING_MSI_FLAG;
6441 free_irq(bp->pdev->irq, bp->dev);
6444 static int bnx2x_enable_msix(struct bnx2x *bp)
6446 int i, rc, offset = 1;
6449 bp->msix_table[0].entry = igu_vec;
6450 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6452 for_each_queue(bp, i) {
6453 igu_vec = BP_L_ID(bp) + offset + i;
6454 bp->msix_table[i + offset].entry = igu_vec;
6455 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6456 "(fastpath #%u)\n", i + offset, igu_vec, i);
6459 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6460 BNX2X_NUM_QUEUES(bp) + offset);
6462 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6466 bp->flags |= USING_MSIX_FLAG;
6471 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6473 int i, rc, offset = 1;
6475 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6476 bp->dev->name, bp->dev);
6478 BNX2X_ERR("request sp irq failed\n");
6482 for_each_queue(bp, i) {
6483 struct bnx2x_fastpath *fp = &bp->fp[i];
6485 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6486 rc = request_irq(bp->msix_table[i + offset].vector,
6487 bnx2x_msix_fp_int, 0, fp->name, fp);
6489 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6490 bnx2x_free_msix_irqs(bp);
6494 fp->state = BNX2X_FP_STATE_IRQ;
6497 i = BNX2X_NUM_QUEUES(bp);
6499 printk(KERN_INFO PFX
6500 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6501 bp->dev->name, bp->msix_table[0].vector,
6502 bp->msix_table[offset].vector,
6503 bp->msix_table[offset + i - 1].vector);
6505 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6506 bp->dev->name, bp->msix_table[0].vector,
6507 bp->msix_table[offset + i - 1].vector);
6512 static int bnx2x_enable_msi(struct bnx2x *bp)
6516 rc = pci_enable_msi(bp->pdev);
6518 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6521 bp->flags |= USING_MSI_FLAG;
6526 static int bnx2x_req_irq(struct bnx2x *bp)
6528 unsigned long flags;
6531 if (bp->flags & USING_MSI_FLAG)
6534 flags = IRQF_SHARED;
6536 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6537 bp->dev->name, bp->dev);
6539 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6544 static void bnx2x_napi_enable(struct bnx2x *bp)
6548 for_each_rx_queue(bp, i)
6549 napi_enable(&bnx2x_fp(bp, i, napi));
6552 static void bnx2x_napi_disable(struct bnx2x *bp)
6556 for_each_rx_queue(bp, i)
6557 napi_disable(&bnx2x_fp(bp, i, napi));
6560 static void bnx2x_netif_start(struct bnx2x *bp)
6562 if (atomic_dec_and_test(&bp->intr_sem)) {
6563 if (netif_running(bp->dev)) {
6564 bnx2x_napi_enable(bp);
6565 bnx2x_int_enable(bp);
6566 if (bp->state == BNX2X_STATE_OPEN)
6567 netif_tx_wake_all_queues(bp->dev);
6572 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6574 bnx2x_int_disable_sync(bp, disable_hw);
6575 bnx2x_napi_disable(bp);
6576 if (netif_running(bp->dev)) {
6577 netif_tx_disable(bp->dev);
6578 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6583 * Init service functions
6586 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6588 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6589 int port = BP_PORT(bp);
6592 * unicasts 0-31:port0 32-63:port1
6593 * multicast 64-127:port0 128-191:port1
6595 config->hdr.length = 2;
6596 config->hdr.offset = port ? 32 : 0;
6597 config->hdr.client_id = bp->fp->cl_id;
6598 config->hdr.reserved1 = 0;
6601 config->config_table[0].cam_entry.msb_mac_addr =
6602 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6603 config->config_table[0].cam_entry.middle_mac_addr =
6604 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6605 config->config_table[0].cam_entry.lsb_mac_addr =
6606 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6607 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6609 config->config_table[0].target_table_entry.flags = 0;
6611 CAM_INVALIDATE(config->config_table[0]);
6612 config->config_table[0].target_table_entry.client_id = 0;
6613 config->config_table[0].target_table_entry.vlan_id = 0;
6615 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6616 (set ? "setting" : "clearing"),
6617 config->config_table[0].cam_entry.msb_mac_addr,
6618 config->config_table[0].cam_entry.middle_mac_addr,
6619 config->config_table[0].cam_entry.lsb_mac_addr);
6622 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6623 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6624 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
6625 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6627 config->config_table[1].target_table_entry.flags =
6628 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6630 CAM_INVALIDATE(config->config_table[1]);
6631 config->config_table[1].target_table_entry.client_id = 0;
6632 config->config_table[1].target_table_entry.vlan_id = 0;
6634 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6635 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6636 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6639 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6641 struct mac_configuration_cmd_e1h *config =
6642 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6644 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6645 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6649 /* CAM allocation for E1H
6650 * unicasts: by func number
6651 * multicast: 20+FUNC*20, 20 each
6653 config->hdr.length = 1;
6654 config->hdr.offset = BP_FUNC(bp);
6655 config->hdr.client_id = bp->fp->cl_id;
6656 config->hdr.reserved1 = 0;
6659 config->config_table[0].msb_mac_addr =
6660 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6661 config->config_table[0].middle_mac_addr =
6662 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6663 config->config_table[0].lsb_mac_addr =
6664 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6665 config->config_table[0].client_id = BP_L_ID(bp);
6666 config->config_table[0].vlan_id = 0;
6667 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6669 config->config_table[0].flags = BP_PORT(bp);
6671 config->config_table[0].flags =
6672 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6674 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6675 (set ? "setting" : "clearing"),
6676 config->config_table[0].msb_mac_addr,
6677 config->config_table[0].middle_mac_addr,
6678 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6680 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6681 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6682 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6685 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6686 int *state_p, int poll)
6688 /* can take a while if any port is running */
6691 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6692 poll ? "polling" : "waiting", state, idx);
6697 bnx2x_rx_int(bp->fp, 10);
6698 /* if index is different from 0
6699 * the reply for some commands will
6700 * be on the non default queue
6703 bnx2x_rx_int(&bp->fp[idx], 10);
6706 mb(); /* state is changed by bnx2x_sp_event() */
6707 if (*state_p == state) {
6708 #ifdef BNX2X_STOP_ON_ERROR
6709 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6718 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6719 poll ? "polling" : "waiting", state, idx);
6720 #ifdef BNX2X_STOP_ON_ERROR
6727 static int bnx2x_setup_leading(struct bnx2x *bp)
6731 /* reset IGU state */
6732 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6735 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6737 /* Wait for completion */
6738 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6743 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6745 struct bnx2x_fastpath *fp = &bp->fp[index];
6747 /* reset IGU state */
6748 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6751 fp->state = BNX2X_FP_STATE_OPENING;
6752 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6755 /* Wait for completion */
6756 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6760 static int bnx2x_poll(struct napi_struct *napi, int budget);
6762 static void bnx2x_set_int_mode(struct bnx2x *bp)
6770 bp->num_rx_queues = num_queues;
6771 bp->num_tx_queues = num_queues;
6773 "set number of queues to %d\n", num_queues);
6778 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6779 num_queues = min_t(u32, num_online_cpus(),
6780 BNX2X_MAX_QUEUES(bp));
6783 bp->num_rx_queues = num_queues;
6784 bp->num_tx_queues = num_queues;
6785 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6786 " number of tx queues to %d\n",
6787 bp->num_rx_queues, bp->num_tx_queues);
6788 /* if we can't use MSI-X we only need one fp,
6789 * so try to enable MSI-X with the requested number of fp's
6790 * and fallback to MSI or legacy INTx with one fp
6792 if (bnx2x_enable_msix(bp)) {
6793 /* failed to enable MSI-X */
6795 bp->num_rx_queues = num_queues;
6796 bp->num_tx_queues = num_queues;
6798 BNX2X_ERR("Multi requested but failed to "
6799 "enable MSI-X set number of "
6800 "queues to %d\n", num_queues);
6804 bp->dev->real_num_tx_queues = bp->num_tx_queues;
6807 static void bnx2x_set_rx_mode(struct net_device *dev);
6809 /* must be called with rtnl_lock */
6810 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6814 #ifdef BNX2X_STOP_ON_ERROR
6815 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6816 if (unlikely(bp->panic))
6820 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6822 bnx2x_set_int_mode(bp);
6824 if (bnx2x_alloc_mem(bp))
6827 for_each_rx_queue(bp, i)
6828 bnx2x_fp(bp, i, disable_tpa) =
6829 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6831 for_each_rx_queue(bp, i)
6832 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6835 #ifdef BNX2X_STOP_ON_ERROR
6836 for_each_rx_queue(bp, i) {
6837 struct bnx2x_fastpath *fp = &bp->fp[i];
6839 fp->poll_no_work = 0;
6841 fp->poll_max_calls = 0;
6842 fp->poll_complete = 0;
6846 bnx2x_napi_enable(bp);
6848 if (bp->flags & USING_MSIX_FLAG) {
6849 rc = bnx2x_req_msix_irqs(bp);
6851 pci_disable_msix(bp->pdev);
6855 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6856 bnx2x_enable_msi(bp);
6858 rc = bnx2x_req_irq(bp);
6860 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6861 if (bp->flags & USING_MSI_FLAG)
6862 pci_disable_msi(bp->pdev);
6865 if (bp->flags & USING_MSI_FLAG) {
6866 bp->dev->irq = bp->pdev->irq;
6867 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6868 bp->dev->name, bp->pdev->irq);
6872 /* Send LOAD_REQUEST command to MCP
6873 Returns the type of LOAD command:
6874 if it is the first port to be initialized
6875 common blocks should be initialized, otherwise - not
6877 if (!BP_NOMCP(bp)) {
6878 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6880 BNX2X_ERR("MCP response failure, aborting\n");
6884 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6885 rc = -EBUSY; /* other port in diagnostic mode */
6890 int port = BP_PORT(bp);
6892 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
6893 load_count[0], load_count[1], load_count[2]);
6895 load_count[1 + port]++;
6896 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
6897 load_count[0], load_count[1], load_count[2]);
6898 if (load_count[0] == 1)
6899 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6900 else if (load_count[1 + port] == 1)
6901 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6903 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6906 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6907 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6911 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6914 rc = bnx2x_init_hw(bp, load_code);
6916 BNX2X_ERR("HW init failed, aborting\n");
6920 /* Setup NIC internals and enable interrupts */
6921 bnx2x_nic_init(bp, load_code);
6923 /* Send LOAD_DONE command to MCP */
6924 if (!BP_NOMCP(bp)) {
6925 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6927 BNX2X_ERR("MCP response failure, aborting\n");
6933 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6935 rc = bnx2x_setup_leading(bp);
6937 BNX2X_ERR("Setup leading failed!\n");
6941 if (CHIP_IS_E1H(bp))
6942 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6943 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
6944 bp->state = BNX2X_STATE_DISABLED;
6947 if (bp->state == BNX2X_STATE_OPEN)
6948 for_each_nondefault_queue(bp, i) {
6949 rc = bnx2x_setup_multi(bp, i);
6955 bnx2x_set_mac_addr_e1(bp, 1);
6957 bnx2x_set_mac_addr_e1h(bp, 1);
6960 bnx2x_initial_phy_init(bp, load_mode);
6962 /* Start fast path */
6963 switch (load_mode) {
6965 /* Tx queue should be only reenabled */
6966 netif_tx_wake_all_queues(bp->dev);
6967 /* Initialize the receive filter. */
6968 bnx2x_set_rx_mode(bp->dev);
6972 netif_tx_start_all_queues(bp->dev);
6973 /* Initialize the receive filter. */
6974 bnx2x_set_rx_mode(bp->dev);
6978 /* Initialize the receive filter. */
6979 bnx2x_set_rx_mode(bp->dev);
6980 bp->state = BNX2X_STATE_DIAG;
6988 bnx2x__link_status_update(bp);
6990 /* start the timer */
6991 mod_timer(&bp->timer, jiffies + bp->current_interval);
6997 bnx2x_int_disable_sync(bp, 1);
6998 if (!BP_NOMCP(bp)) {
6999 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7000 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7003 /* Free SKBs, SGEs, TPA pool and driver internals */
7004 bnx2x_free_skbs(bp);
7005 for_each_rx_queue(bp, i)
7006 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7011 bnx2x_napi_disable(bp);
7012 for_each_rx_queue(bp, i)
7013 netif_napi_del(&bnx2x_fp(bp, i, napi));
7019 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7021 struct bnx2x_fastpath *fp = &bp->fp[index];
7024 /* halt the connection */
7025 fp->state = BNX2X_FP_STATE_HALTING;
7026 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7028 /* Wait for completion */
7029 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7031 if (rc) /* timeout */
7034 /* delete cfc entry */
7035 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7037 /* Wait for completion */
7038 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7043 static int bnx2x_stop_leading(struct bnx2x *bp)
7045 __le16 dsb_sp_prod_idx;
7046 /* if the other port is handling traffic,
7047 this can take a lot of time */
7053 /* Send HALT ramrod */
7054 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7055 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7057 /* Wait for completion */
7058 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7059 &(bp->fp[0].state), 1);
7060 if (rc) /* timeout */
7063 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7065 /* Send PORT_DELETE ramrod */
7066 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7068 /* Wait for completion to arrive on default status block
7069 we are going to reset the chip anyway
7070 so there is not much to do if this times out
7072 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7074 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7075 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7076 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7077 #ifdef BNX2X_STOP_ON_ERROR
7085 rmb(); /* Refresh the dsb_sp_prod */
7087 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7088 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7093 static void bnx2x_reset_func(struct bnx2x *bp)
7095 int port = BP_PORT(bp);
7096 int func = BP_FUNC(bp);
7100 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7101 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7104 base = FUNC_ILT_BASE(func);
7105 for (i = base; i < base + ILT_PER_FUNC; i++)
7106 bnx2x_ilt_wr(bp, i, 0);
7109 static void bnx2x_reset_port(struct bnx2x *bp)
7111 int port = BP_PORT(bp);
7114 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7116 /* Do not rcv packets to BRB */
7117 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7118 /* Do not direct rcv packets that are not for MCP to the BRB */
7119 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7120 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7123 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7126 /* Check for BRB port occupancy */
7127 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7129 DP(NETIF_MSG_IFDOWN,
7130 "BRB1 is not empty %d blocks are occupied\n", val);
7132 /* TODO: Close Doorbell port? */
7135 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7137 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7138 BP_FUNC(bp), reset_code);
7140 switch (reset_code) {
7141 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7142 bnx2x_reset_port(bp);
7143 bnx2x_reset_func(bp);
7144 bnx2x_reset_common(bp);
7147 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7148 bnx2x_reset_port(bp);
7149 bnx2x_reset_func(bp);
7152 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7153 bnx2x_reset_func(bp);
7157 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7162 /* must be called with rtnl_lock */
7163 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7165 int port = BP_PORT(bp);
7169 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7171 bp->rx_mode = BNX2X_RX_MODE_NONE;
7172 bnx2x_set_storm_rx_mode(bp);
7174 bnx2x_netif_stop(bp, 1);
7176 del_timer_sync(&bp->timer);
7177 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7178 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7179 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7184 /* Wait until tx fastpath tasks complete */
7185 for_each_tx_queue(bp, i) {
7186 struct bnx2x_fastpath *fp = &bp->fp[i];
7189 while (bnx2x_has_tx_work_unload(fp)) {
7193 BNX2X_ERR("timeout waiting for queue[%d]\n",
7195 #ifdef BNX2X_STOP_ON_ERROR
7206 /* Give HW time to discard old tx messages */
7209 if (CHIP_IS_E1(bp)) {
7210 struct mac_configuration_cmd *config =
7211 bnx2x_sp(bp, mcast_config);
7213 bnx2x_set_mac_addr_e1(bp, 0);
7215 for (i = 0; i < config->hdr.length; i++)
7216 CAM_INVALIDATE(config->config_table[i]);
7218 config->hdr.length = i;
7219 if (CHIP_REV_IS_SLOW(bp))
7220 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7222 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7223 config->hdr.client_id = bp->fp->cl_id;
7224 config->hdr.reserved1 = 0;
7226 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7227 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7228 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7231 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7233 bnx2x_set_mac_addr_e1h(bp, 0);
7235 for (i = 0; i < MC_HASH_SIZE; i++)
7236 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7239 if (unload_mode == UNLOAD_NORMAL)
7240 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7242 else if (bp->flags & NO_WOL_FLAG) {
7243 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7244 if (CHIP_IS_E1H(bp))
7245 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7247 } else if (bp->wol) {
7248 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7249 u8 *mac_addr = bp->dev->dev_addr;
7251 /* The mac address is written to entries 1-4 to
7252 preserve entry 0 which is used by the PMF */
7253 u8 entry = (BP_E1HVN(bp) + 1)*8;
7255 val = (mac_addr[0] << 8) | mac_addr[1];
7256 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7258 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7259 (mac_addr[4] << 8) | mac_addr[5];
7260 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7262 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7265 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7267 /* Close multi and leading connections
7268 Completions for ramrods are collected in a synchronous way */
7269 for_each_nondefault_queue(bp, i)
7270 if (bnx2x_stop_multi(bp, i))
7273 rc = bnx2x_stop_leading(bp);
7275 BNX2X_ERR("Stop leading failed!\n");
7276 #ifdef BNX2X_STOP_ON_ERROR
7285 reset_code = bnx2x_fw_command(bp, reset_code);
7287 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
7288 load_count[0], load_count[1], load_count[2]);
7290 load_count[1 + port]--;
7291 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
7292 load_count[0], load_count[1], load_count[2]);
7293 if (load_count[0] == 0)
7294 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7295 else if (load_count[1 + port] == 0)
7296 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7298 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7301 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7302 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7303 bnx2x__link_reset(bp);
7305 /* Reset the chip */
7306 bnx2x_reset_chip(bp, reset_code);
7308 /* Report UNLOAD_DONE to MCP */
7310 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7314 /* Free SKBs, SGEs, TPA pool and driver internals */
7315 bnx2x_free_skbs(bp);
7316 for_each_rx_queue(bp, i)
7317 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7318 for_each_rx_queue(bp, i)
7319 netif_napi_del(&bnx2x_fp(bp, i, napi));
7322 bp->state = BNX2X_STATE_CLOSED;
7324 netif_carrier_off(bp->dev);
7329 static void bnx2x_reset_task(struct work_struct *work)
7331 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7333 #ifdef BNX2X_STOP_ON_ERROR
7334 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7335 " so reset not done to allow debug dump,\n"
7336 KERN_ERR " you will need to reboot when done\n");
7342 if (!netif_running(bp->dev))
7343 goto reset_task_exit;
7345 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7346 bnx2x_nic_load(bp, LOAD_NORMAL);
7352 /* end of nic load/unload */
7357 * Init service functions
7360 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7363 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7364 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7365 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7366 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7367 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7368 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7369 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7370 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7372 BNX2X_ERR("Unsupported function index: %d\n", func);
7377 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7379 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7381 /* Flush all outstanding writes */
7384 /* Pretend to be function 0 */
7386 /* Flush the GRC transaction (in the chip) */
7387 new_val = REG_RD(bp, reg);
7389 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7394 /* From now we are in the "like-E1" mode */
7395 bnx2x_int_disable(bp);
7397 /* Flush all outstanding writes */
7400 /* Restore the original funtion settings */
7401 REG_WR(bp, reg, orig_func);
7402 new_val = REG_RD(bp, reg);
7403 if (new_val != orig_func) {
7404 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7405 orig_func, new_val);
7410 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7412 if (CHIP_IS_E1H(bp))
7413 bnx2x_undi_int_disable_e1h(bp, func);
7415 bnx2x_int_disable(bp);
7418 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7422 /* Check if there is any driver already loaded */
7423 val = REG_RD(bp, MISC_REG_UNPREPARED);
7425 /* Check if it is the UNDI driver
7426 * UNDI driver initializes CID offset for normal bell to 0x7
7428 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7429 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7431 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7433 int func = BP_FUNC(bp);
7437 /* clear the UNDI indication */
7438 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7440 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7442 /* try unload UNDI on port 0 */
7445 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7446 DRV_MSG_SEQ_NUMBER_MASK);
7447 reset_code = bnx2x_fw_command(bp, reset_code);
7449 /* if UNDI is loaded on the other port */
7450 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7452 /* send "DONE" for previous unload */
7453 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7455 /* unload UNDI on port 1 */
7458 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7459 DRV_MSG_SEQ_NUMBER_MASK);
7460 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7462 bnx2x_fw_command(bp, reset_code);
7465 /* now it's safe to release the lock */
7466 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7468 bnx2x_undi_int_disable(bp, func);
7470 /* close input traffic and wait for it */
7471 /* Do not rcv packets to BRB */
7473 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7474 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7475 /* Do not direct rcv packets that are not for MCP to
7478 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7479 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7482 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7483 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7486 /* save NIG port swap info */
7487 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7488 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7491 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7494 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7496 /* take the NIG out of reset and restore swap values */
7498 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7499 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7500 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7501 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7503 /* send unload done to the MCP */
7504 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7506 /* restore our func and fw_seq */
7509 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7510 DRV_MSG_SEQ_NUMBER_MASK);
7513 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7517 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7519 u32 val, val2, val3, val4, id;
7522 /* Get the chip revision id and number. */
7523 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7524 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7525 id = ((val & 0xffff) << 16);
7526 val = REG_RD(bp, MISC_REG_CHIP_REV);
7527 id |= ((val & 0xf) << 12);
7528 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7529 id |= ((val & 0xff) << 4);
7530 val = REG_RD(bp, MISC_REG_BOND_ID);
7532 bp->common.chip_id = id;
7533 bp->link_params.chip_id = bp->common.chip_id;
7534 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7536 val = (REG_RD(bp, 0x2874) & 0x55);
7537 if ((bp->common.chip_id & 0x1) ||
7538 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7539 bp->flags |= ONE_PORT_FLAG;
7540 BNX2X_DEV_INFO("single port device\n");
7543 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7544 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7545 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7546 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7547 bp->common.flash_size, bp->common.flash_size);
7549 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7550 bp->link_params.shmem_base = bp->common.shmem_base;
7551 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7553 if (!bp->common.shmem_base ||
7554 (bp->common.shmem_base < 0xA0000) ||
7555 (bp->common.shmem_base >= 0xC0000)) {
7556 BNX2X_DEV_INFO("MCP not active\n");
7557 bp->flags |= NO_MCP_FLAG;
7561 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7562 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7563 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7564 BNX2X_ERR("BAD MCP validity signature\n");
7566 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7567 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7569 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7570 SHARED_HW_CFG_LED_MODE_MASK) >>
7571 SHARED_HW_CFG_LED_MODE_SHIFT);
7573 bp->link_params.feature_config_flags = 0;
7574 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7575 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7576 bp->link_params.feature_config_flags |=
7577 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7579 bp->link_params.feature_config_flags &=
7580 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7582 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7583 bp->common.bc_ver = val;
7584 BNX2X_DEV_INFO("bc_ver %X\n", val);
7585 if (val < BNX2X_BC_VER) {
7586 /* for now only warn
7587 * later we might need to enforce this */
7588 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7589 " please upgrade BC\n", BNX2X_BC_VER, val);
7592 if (BP_E1HVN(bp) == 0) {
7593 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7594 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7596 /* no WOL capability for E1HVN != 0 */
7597 bp->flags |= NO_WOL_FLAG;
7599 BNX2X_DEV_INFO("%sWoL capable\n",
7600 (bp->flags & NO_WOL_FLAG) ? "not " : "");
7602 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7603 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7604 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7605 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7607 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7608 val, val2, val3, val4);
7611 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7614 int port = BP_PORT(bp);
7617 switch (switch_cfg) {
7619 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7622 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7623 switch (ext_phy_type) {
7624 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7625 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7628 bp->port.supported |= (SUPPORTED_10baseT_Half |
7629 SUPPORTED_10baseT_Full |
7630 SUPPORTED_100baseT_Half |
7631 SUPPORTED_100baseT_Full |
7632 SUPPORTED_1000baseT_Full |
7633 SUPPORTED_2500baseX_Full |
7638 SUPPORTED_Asym_Pause);
7641 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7642 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7645 bp->port.supported |= (SUPPORTED_10baseT_Half |
7646 SUPPORTED_10baseT_Full |
7647 SUPPORTED_100baseT_Half |
7648 SUPPORTED_100baseT_Full |
7649 SUPPORTED_1000baseT_Full |
7654 SUPPORTED_Asym_Pause);
7658 BNX2X_ERR("NVRAM config error. "
7659 "BAD SerDes ext_phy_config 0x%x\n",
7660 bp->link_params.ext_phy_config);
7664 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7666 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7669 case SWITCH_CFG_10G:
7670 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7673 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7674 switch (ext_phy_type) {
7675 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7676 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7679 bp->port.supported |= (SUPPORTED_10baseT_Half |
7680 SUPPORTED_10baseT_Full |
7681 SUPPORTED_100baseT_Half |
7682 SUPPORTED_100baseT_Full |
7683 SUPPORTED_1000baseT_Full |
7684 SUPPORTED_2500baseX_Full |
7685 SUPPORTED_10000baseT_Full |
7690 SUPPORTED_Asym_Pause);
7693 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7694 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7697 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7698 SUPPORTED_1000baseT_Full |
7702 SUPPORTED_Asym_Pause);
7705 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7706 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7709 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7710 SUPPORTED_2500baseX_Full |
7711 SUPPORTED_1000baseT_Full |
7715 SUPPORTED_Asym_Pause);
7718 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7719 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7722 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7725 SUPPORTED_Asym_Pause);
7728 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7729 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7732 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7733 SUPPORTED_1000baseT_Full |
7736 SUPPORTED_Asym_Pause);
7739 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7740 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7743 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7744 SUPPORTED_1000baseT_Full |
7748 SUPPORTED_Asym_Pause);
7751 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7752 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7755 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7759 SUPPORTED_Asym_Pause);
7762 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7763 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7766 bp->port.supported |= (SUPPORTED_10baseT_Half |
7767 SUPPORTED_10baseT_Full |
7768 SUPPORTED_100baseT_Half |
7769 SUPPORTED_100baseT_Full |
7770 SUPPORTED_1000baseT_Full |
7771 SUPPORTED_10000baseT_Full |
7775 SUPPORTED_Asym_Pause);
7778 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7779 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7780 bp->link_params.ext_phy_config);
7784 BNX2X_ERR("NVRAM config error. "
7785 "BAD XGXS ext_phy_config 0x%x\n",
7786 bp->link_params.ext_phy_config);
7790 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7792 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7797 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7798 bp->port.link_config);
7801 bp->link_params.phy_addr = bp->port.phy_addr;
7803 /* mask what we support according to speed_cap_mask */
7804 if (!(bp->link_params.speed_cap_mask &
7805 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7806 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7808 if (!(bp->link_params.speed_cap_mask &
7809 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7810 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7812 if (!(bp->link_params.speed_cap_mask &
7813 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7814 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7816 if (!(bp->link_params.speed_cap_mask &
7817 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7818 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7820 if (!(bp->link_params.speed_cap_mask &
7821 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7822 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7823 SUPPORTED_1000baseT_Full);
7825 if (!(bp->link_params.speed_cap_mask &
7826 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7827 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7829 if (!(bp->link_params.speed_cap_mask &
7830 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7831 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7833 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7836 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7838 bp->link_params.req_duplex = DUPLEX_FULL;
7840 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7841 case PORT_FEATURE_LINK_SPEED_AUTO:
7842 if (bp->port.supported & SUPPORTED_Autoneg) {
7843 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7844 bp->port.advertising = bp->port.supported;
7847 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7849 if ((ext_phy_type ==
7850 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7852 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7853 /* force 10G, no AN */
7854 bp->link_params.req_line_speed = SPEED_10000;
7855 bp->port.advertising =
7856 (ADVERTISED_10000baseT_Full |
7860 BNX2X_ERR("NVRAM config error. "
7861 "Invalid link_config 0x%x"
7862 " Autoneg not supported\n",
7863 bp->port.link_config);
7868 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7869 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7870 bp->link_params.req_line_speed = SPEED_10;
7871 bp->port.advertising = (ADVERTISED_10baseT_Full |
7874 BNX2X_ERR("NVRAM config error. "
7875 "Invalid link_config 0x%x"
7876 " speed_cap_mask 0x%x\n",
7877 bp->port.link_config,
7878 bp->link_params.speed_cap_mask);
7883 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7884 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7885 bp->link_params.req_line_speed = SPEED_10;
7886 bp->link_params.req_duplex = DUPLEX_HALF;
7887 bp->port.advertising = (ADVERTISED_10baseT_Half |
7890 BNX2X_ERR("NVRAM config error. "
7891 "Invalid link_config 0x%x"
7892 " speed_cap_mask 0x%x\n",
7893 bp->port.link_config,
7894 bp->link_params.speed_cap_mask);
7899 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7900 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7901 bp->link_params.req_line_speed = SPEED_100;
7902 bp->port.advertising = (ADVERTISED_100baseT_Full |
7905 BNX2X_ERR("NVRAM config error. "
7906 "Invalid link_config 0x%x"
7907 " speed_cap_mask 0x%x\n",
7908 bp->port.link_config,
7909 bp->link_params.speed_cap_mask);
7914 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7915 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7916 bp->link_params.req_line_speed = SPEED_100;
7917 bp->link_params.req_duplex = DUPLEX_HALF;
7918 bp->port.advertising = (ADVERTISED_100baseT_Half |
7921 BNX2X_ERR("NVRAM config error. "
7922 "Invalid link_config 0x%x"
7923 " speed_cap_mask 0x%x\n",
7924 bp->port.link_config,
7925 bp->link_params.speed_cap_mask);
7930 case PORT_FEATURE_LINK_SPEED_1G:
7931 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7932 bp->link_params.req_line_speed = SPEED_1000;
7933 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7936 BNX2X_ERR("NVRAM config error. "
7937 "Invalid link_config 0x%x"
7938 " speed_cap_mask 0x%x\n",
7939 bp->port.link_config,
7940 bp->link_params.speed_cap_mask);
7945 case PORT_FEATURE_LINK_SPEED_2_5G:
7946 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7947 bp->link_params.req_line_speed = SPEED_2500;
7948 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7951 BNX2X_ERR("NVRAM config error. "
7952 "Invalid link_config 0x%x"
7953 " speed_cap_mask 0x%x\n",
7954 bp->port.link_config,
7955 bp->link_params.speed_cap_mask);
7960 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7961 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7962 case PORT_FEATURE_LINK_SPEED_10G_KR:
7963 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7964 bp->link_params.req_line_speed = SPEED_10000;
7965 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7968 BNX2X_ERR("NVRAM config error. "
7969 "Invalid link_config 0x%x"
7970 " speed_cap_mask 0x%x\n",
7971 bp->port.link_config,
7972 bp->link_params.speed_cap_mask);
7978 BNX2X_ERR("NVRAM config error. "
7979 "BAD link speed link_config 0x%x\n",
7980 bp->port.link_config);
7981 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7982 bp->port.advertising = bp->port.supported;
7986 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7987 PORT_FEATURE_FLOW_CONTROL_MASK);
7988 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7989 !(bp->port.supported & SUPPORTED_Autoneg))
7990 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7992 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7993 " advertising 0x%x\n",
7994 bp->link_params.req_line_speed,
7995 bp->link_params.req_duplex,
7996 bp->link_params.req_flow_ctrl, bp->port.advertising);
7999 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8001 int port = BP_PORT(bp);
8006 bp->link_params.bp = bp;
8007 bp->link_params.port = port;
8009 bp->link_params.lane_config =
8010 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8011 bp->link_params.ext_phy_config =
8013 dev_info.port_hw_config[port].external_phy_config);
8014 bp->link_params.speed_cap_mask =
8016 dev_info.port_hw_config[port].speed_capability_mask);
8018 bp->port.link_config =
8019 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8021 /* Get the 4 lanes xgxs config rx and tx */
8022 for (i = 0; i < 2; i++) {
8024 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8025 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8026 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8029 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8030 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8031 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8034 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8035 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8036 bp->link_params.feature_config_flags |=
8037 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8039 bp->link_params.feature_config_flags &=
8040 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8042 /* If the device is capable of WoL, set the default state according
8045 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8046 (config & PORT_FEATURE_WOL_ENABLED));
8048 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8049 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8050 bp->link_params.lane_config,
8051 bp->link_params.ext_phy_config,
8052 bp->link_params.speed_cap_mask, bp->port.link_config);
8054 bp->link_params.switch_cfg = (bp->port.link_config &
8055 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8056 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8058 bnx2x_link_settings_requested(bp);
8060 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8061 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8062 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8063 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8064 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8065 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8066 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8067 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8068 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8069 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8072 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8074 int func = BP_FUNC(bp);
8078 bnx2x_get_common_hwinfo(bp);
8082 if (CHIP_IS_E1H(bp)) {
8084 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8086 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8087 FUNC_MF_CFG_E1HOV_TAG_MASK);
8088 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8092 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8094 func, bp->e1hov, bp->e1hov);
8096 BNX2X_DEV_INFO("single function mode\n");
8098 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8099 " aborting\n", func);
8105 if (!BP_NOMCP(bp)) {
8106 bnx2x_get_port_hwinfo(bp);
8108 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8109 DRV_MSG_SEQ_NUMBER_MASK);
8110 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8114 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8115 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8116 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8117 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8118 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8119 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8120 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8121 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8122 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8123 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8124 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8126 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8134 /* only supposed to happen on emulation/FPGA */
8135 BNX2X_ERR("warning random MAC workaround active\n");
8136 random_ether_addr(bp->dev->dev_addr);
8137 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8143 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8145 int func = BP_FUNC(bp);
8149 /* Disable interrupt handling until HW is initialized */
8150 atomic_set(&bp->intr_sem, 1);
8152 mutex_init(&bp->port.phy_mutex);
8154 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8155 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8157 rc = bnx2x_get_hwinfo(bp);
8159 /* need to reset chip if undi was active */
8161 bnx2x_undi_unload(bp);
8163 if (CHIP_REV_IS_FPGA(bp))
8164 printk(KERN_ERR PFX "FPGA detected\n");
8166 if (BP_NOMCP(bp) && (func == 0))
8168 "MCP disabled, must load devices in order!\n");
8170 /* Set multi queue mode */
8171 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8172 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8174 "Multi disabled since int_mode requested is not MSI-X\n");
8175 multi_mode = ETH_RSS_MODE_DISABLED;
8177 bp->multi_mode = multi_mode;
8182 bp->flags &= ~TPA_ENABLE_FLAG;
8183 bp->dev->features &= ~NETIF_F_LRO;
8185 bp->flags |= TPA_ENABLE_FLAG;
8186 bp->dev->features |= NETIF_F_LRO;
8191 bp->tx_ring_size = MAX_TX_AVAIL;
8192 bp->rx_ring_size = MAX_RX_AVAIL;
8199 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8200 bp->current_interval = (poll ? poll : timer_interval);
8202 init_timer(&bp->timer);
8203 bp->timer.expires = jiffies + bp->current_interval;
8204 bp->timer.data = (unsigned long) bp;
8205 bp->timer.function = bnx2x_timer;
8211 * ethtool service functions
8214 /* All ethtool functions called with rtnl_lock */
8216 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8218 struct bnx2x *bp = netdev_priv(dev);
8220 cmd->supported = bp->port.supported;
8221 cmd->advertising = bp->port.advertising;
8223 if (netif_carrier_ok(dev)) {
8224 cmd->speed = bp->link_vars.line_speed;
8225 cmd->duplex = bp->link_vars.duplex;
8227 cmd->speed = bp->link_params.req_line_speed;
8228 cmd->duplex = bp->link_params.req_duplex;
8233 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8234 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8235 if (vn_max_rate < cmd->speed)
8236 cmd->speed = vn_max_rate;
8239 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8241 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8243 switch (ext_phy_type) {
8244 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8245 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8246 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8247 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8248 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8249 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8250 cmd->port = PORT_FIBRE;
8253 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8254 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8255 cmd->port = PORT_TP;
8258 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8259 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8260 bp->link_params.ext_phy_config);
8264 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8265 bp->link_params.ext_phy_config);
8269 cmd->port = PORT_TP;
8271 cmd->phy_address = bp->port.phy_addr;
8272 cmd->transceiver = XCVR_INTERNAL;
8274 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8275 cmd->autoneg = AUTONEG_ENABLE;
8277 cmd->autoneg = AUTONEG_DISABLE;
8282 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8283 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8284 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8285 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8286 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8287 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8288 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8293 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8295 struct bnx2x *bp = netdev_priv(dev);
8301 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8302 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8303 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8304 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8305 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8306 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8307 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8309 if (cmd->autoneg == AUTONEG_ENABLE) {
8310 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8311 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8315 /* advertise the requested speed and duplex if supported */
8316 cmd->advertising &= bp->port.supported;
8318 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8319 bp->link_params.req_duplex = DUPLEX_FULL;
8320 bp->port.advertising |= (ADVERTISED_Autoneg |
8323 } else { /* forced speed */
8324 /* advertise the requested speed and duplex if supported */
8325 switch (cmd->speed) {
8327 if (cmd->duplex == DUPLEX_FULL) {
8328 if (!(bp->port.supported &
8329 SUPPORTED_10baseT_Full)) {
8331 "10M full not supported\n");
8335 advertising = (ADVERTISED_10baseT_Full |
8338 if (!(bp->port.supported &
8339 SUPPORTED_10baseT_Half)) {
8341 "10M half not supported\n");
8345 advertising = (ADVERTISED_10baseT_Half |
8351 if (cmd->duplex == DUPLEX_FULL) {
8352 if (!(bp->port.supported &
8353 SUPPORTED_100baseT_Full)) {
8355 "100M full not supported\n");
8359 advertising = (ADVERTISED_100baseT_Full |
8362 if (!(bp->port.supported &
8363 SUPPORTED_100baseT_Half)) {
8365 "100M half not supported\n");
8369 advertising = (ADVERTISED_100baseT_Half |
8375 if (cmd->duplex != DUPLEX_FULL) {
8376 DP(NETIF_MSG_LINK, "1G half not supported\n");
8380 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8381 DP(NETIF_MSG_LINK, "1G full not supported\n");
8385 advertising = (ADVERTISED_1000baseT_Full |
8390 if (cmd->duplex != DUPLEX_FULL) {
8392 "2.5G half not supported\n");
8396 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8398 "2.5G full not supported\n");
8402 advertising = (ADVERTISED_2500baseX_Full |
8407 if (cmd->duplex != DUPLEX_FULL) {
8408 DP(NETIF_MSG_LINK, "10G half not supported\n");
8412 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8413 DP(NETIF_MSG_LINK, "10G full not supported\n");
8417 advertising = (ADVERTISED_10000baseT_Full |
8422 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8426 bp->link_params.req_line_speed = cmd->speed;
8427 bp->link_params.req_duplex = cmd->duplex;
8428 bp->port.advertising = advertising;
8431 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8432 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8433 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8434 bp->port.advertising);
8436 if (netif_running(dev)) {
8437 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8444 #define PHY_FW_VER_LEN 10
8446 static void bnx2x_get_drvinfo(struct net_device *dev,
8447 struct ethtool_drvinfo *info)
8449 struct bnx2x *bp = netdev_priv(dev);
8450 u8 phy_fw_ver[PHY_FW_VER_LEN];
8452 strcpy(info->driver, DRV_MODULE_NAME);
8453 strcpy(info->version, DRV_MODULE_VERSION);
8455 phy_fw_ver[0] = '\0';
8457 bnx2x_acquire_phy_lock(bp);
8458 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8459 (bp->state != BNX2X_STATE_CLOSED),
8460 phy_fw_ver, PHY_FW_VER_LEN);
8461 bnx2x_release_phy_lock(bp);
8464 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8465 (bp->common.bc_ver & 0xff0000) >> 16,
8466 (bp->common.bc_ver & 0xff00) >> 8,
8467 (bp->common.bc_ver & 0xff),
8468 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8469 strcpy(info->bus_info, pci_name(bp->pdev));
8470 info->n_stats = BNX2X_NUM_STATS;
8471 info->testinfo_len = BNX2X_NUM_TESTS;
8472 info->eedump_len = bp->common.flash_size;
8473 info->regdump_len = 0;
8476 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8478 struct bnx2x *bp = netdev_priv(dev);
8480 if (bp->flags & NO_WOL_FLAG) {
8484 wol->supported = WAKE_MAGIC;
8486 wol->wolopts = WAKE_MAGIC;
8490 memset(&wol->sopass, 0, sizeof(wol->sopass));
8493 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8495 struct bnx2x *bp = netdev_priv(dev);
8497 if (wol->wolopts & ~WAKE_MAGIC)
8500 if (wol->wolopts & WAKE_MAGIC) {
8501 if (bp->flags & NO_WOL_FLAG)
8511 static u32 bnx2x_get_msglevel(struct net_device *dev)
8513 struct bnx2x *bp = netdev_priv(dev);
8515 return bp->msglevel;
8518 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8520 struct bnx2x *bp = netdev_priv(dev);
8522 if (capable(CAP_NET_ADMIN))
8523 bp->msglevel = level;
8526 static int bnx2x_nway_reset(struct net_device *dev)
8528 struct bnx2x *bp = netdev_priv(dev);
8533 if (netif_running(dev)) {
8534 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8541 static int bnx2x_get_eeprom_len(struct net_device *dev)
8543 struct bnx2x *bp = netdev_priv(dev);
8545 return bp->common.flash_size;
8548 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8550 int port = BP_PORT(bp);
8554 /* adjust timeout for emulation/FPGA */
8555 count = NVRAM_TIMEOUT_COUNT;
8556 if (CHIP_REV_IS_SLOW(bp))
8559 /* request access to nvram interface */
8560 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8561 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8563 for (i = 0; i < count*10; i++) {
8564 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8565 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8571 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8572 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8579 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8581 int port = BP_PORT(bp);
8585 /* adjust timeout for emulation/FPGA */
8586 count = NVRAM_TIMEOUT_COUNT;
8587 if (CHIP_REV_IS_SLOW(bp))
8590 /* relinquish nvram interface */
8591 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8592 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8594 for (i = 0; i < count*10; i++) {
8595 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8596 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8602 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8603 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8610 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8614 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8616 /* enable both bits, even on read */
8617 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8618 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8619 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8622 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8626 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8628 /* disable both bits, even after read */
8629 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8630 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8631 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8634 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
8640 /* build the command word */
8641 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8643 /* need to clear DONE bit separately */
8644 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8646 /* address of the NVRAM to read from */
8647 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8648 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8650 /* issue a read command */
8651 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8653 /* adjust timeout for emulation/FPGA */
8654 count = NVRAM_TIMEOUT_COUNT;
8655 if (CHIP_REV_IS_SLOW(bp))
8658 /* wait for completion */
8661 for (i = 0; i < count; i++) {
8663 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8665 if (val & MCPR_NVM_COMMAND_DONE) {
8666 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8667 /* we read nvram data in cpu order
8668 * but ethtool sees it as an array of bytes
8669 * converting to big-endian will do the work */
8670 *ret_val = cpu_to_be32(val);
8679 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8686 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8688 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8693 if (offset + buf_size > bp->common.flash_size) {
8694 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8695 " buf_size (0x%x) > flash_size (0x%x)\n",
8696 offset, buf_size, bp->common.flash_size);
8700 /* request access to nvram interface */
8701 rc = bnx2x_acquire_nvram_lock(bp);
8705 /* enable access to nvram interface */
8706 bnx2x_enable_nvram_access(bp);
8708 /* read the first word(s) */
8709 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8710 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8711 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8712 memcpy(ret_buf, &val, 4);
8714 /* advance to the next dword */
8715 offset += sizeof(u32);
8716 ret_buf += sizeof(u32);
8717 buf_size -= sizeof(u32);
8722 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8723 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8724 memcpy(ret_buf, &val, 4);
8727 /* disable access to nvram interface */
8728 bnx2x_disable_nvram_access(bp);
8729 bnx2x_release_nvram_lock(bp);
8734 static int bnx2x_get_eeprom(struct net_device *dev,
8735 struct ethtool_eeprom *eeprom, u8 *eebuf)
8737 struct bnx2x *bp = netdev_priv(dev);
8740 if (!netif_running(dev))
8743 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8744 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8745 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8746 eeprom->len, eeprom->len);
8748 /* parameters already validated in ethtool_get_eeprom */
8750 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8755 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8760 /* build the command word */
8761 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8763 /* need to clear DONE bit separately */
8764 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8766 /* write the data */
8767 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8769 /* address of the NVRAM to write to */
8770 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8771 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8773 /* issue the write command */
8774 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8776 /* adjust timeout for emulation/FPGA */
8777 count = NVRAM_TIMEOUT_COUNT;
8778 if (CHIP_REV_IS_SLOW(bp))
8781 /* wait for completion */
8783 for (i = 0; i < count; i++) {
8785 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8786 if (val & MCPR_NVM_COMMAND_DONE) {
8795 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8797 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8805 if (offset + buf_size > bp->common.flash_size) {
8806 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8807 " buf_size (0x%x) > flash_size (0x%x)\n",
8808 offset, buf_size, bp->common.flash_size);
8812 /* request access to nvram interface */
8813 rc = bnx2x_acquire_nvram_lock(bp);
8817 /* enable access to nvram interface */
8818 bnx2x_enable_nvram_access(bp);
8820 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8821 align_offset = (offset & ~0x03);
8822 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8825 val &= ~(0xff << BYTE_OFFSET(offset));
8826 val |= (*data_buf << BYTE_OFFSET(offset));
8828 /* nvram data is returned as an array of bytes
8829 * convert it back to cpu order */
8830 val = be32_to_cpu(val);
8832 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8836 /* disable access to nvram interface */
8837 bnx2x_disable_nvram_access(bp);
8838 bnx2x_release_nvram_lock(bp);
8843 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8851 if (buf_size == 1) /* ethtool */
8852 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8854 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8856 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8861 if (offset + buf_size > bp->common.flash_size) {
8862 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8863 " buf_size (0x%x) > flash_size (0x%x)\n",
8864 offset, buf_size, bp->common.flash_size);
8868 /* request access to nvram interface */
8869 rc = bnx2x_acquire_nvram_lock(bp);
8873 /* enable access to nvram interface */
8874 bnx2x_enable_nvram_access(bp);
8877 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8878 while ((written_so_far < buf_size) && (rc == 0)) {
8879 if (written_so_far == (buf_size - sizeof(u32)))
8880 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8881 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8882 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8883 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8884 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8886 memcpy(&val, data_buf, 4);
8888 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8890 /* advance to the next dword */
8891 offset += sizeof(u32);
8892 data_buf += sizeof(u32);
8893 written_so_far += sizeof(u32);
8897 /* disable access to nvram interface */
8898 bnx2x_disable_nvram_access(bp);
8899 bnx2x_release_nvram_lock(bp);
8904 static int bnx2x_set_eeprom(struct net_device *dev,
8905 struct ethtool_eeprom *eeprom, u8 *eebuf)
8907 struct bnx2x *bp = netdev_priv(dev);
8910 if (!netif_running(dev))
8913 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8914 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8915 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8916 eeprom->len, eeprom->len);
8918 /* parameters already validated in ethtool_set_eeprom */
8920 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8921 if (eeprom->magic == 0x00504859)
8924 bnx2x_acquire_phy_lock(bp);
8925 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8926 bp->link_params.ext_phy_config,
8927 (bp->state != BNX2X_STATE_CLOSED),
8928 eebuf, eeprom->len);
8929 if ((bp->state == BNX2X_STATE_OPEN) ||
8930 (bp->state == BNX2X_STATE_DISABLED)) {
8931 rc |= bnx2x_link_reset(&bp->link_params,
8933 rc |= bnx2x_phy_init(&bp->link_params,
8936 bnx2x_release_phy_lock(bp);
8938 } else /* Only the PMF can access the PHY */
8941 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8946 static int bnx2x_get_coalesce(struct net_device *dev,
8947 struct ethtool_coalesce *coal)
8949 struct bnx2x *bp = netdev_priv(dev);
8951 memset(coal, 0, sizeof(struct ethtool_coalesce));
8953 coal->rx_coalesce_usecs = bp->rx_ticks;
8954 coal->tx_coalesce_usecs = bp->tx_ticks;
8959 static int bnx2x_set_coalesce(struct net_device *dev,
8960 struct ethtool_coalesce *coal)
8962 struct bnx2x *bp = netdev_priv(dev);
8964 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8965 if (bp->rx_ticks > 3000)
8966 bp->rx_ticks = 3000;
8968 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8969 if (bp->tx_ticks > 0x3000)
8970 bp->tx_ticks = 0x3000;
8972 if (netif_running(dev))
8973 bnx2x_update_coalesce(bp);
8978 static void bnx2x_get_ringparam(struct net_device *dev,
8979 struct ethtool_ringparam *ering)
8981 struct bnx2x *bp = netdev_priv(dev);
8983 ering->rx_max_pending = MAX_RX_AVAIL;
8984 ering->rx_mini_max_pending = 0;
8985 ering->rx_jumbo_max_pending = 0;
8987 ering->rx_pending = bp->rx_ring_size;
8988 ering->rx_mini_pending = 0;
8989 ering->rx_jumbo_pending = 0;
8991 ering->tx_max_pending = MAX_TX_AVAIL;
8992 ering->tx_pending = bp->tx_ring_size;
8995 static int bnx2x_set_ringparam(struct net_device *dev,
8996 struct ethtool_ringparam *ering)
8998 struct bnx2x *bp = netdev_priv(dev);
9001 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9002 (ering->tx_pending > MAX_TX_AVAIL) ||
9003 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9006 bp->rx_ring_size = ering->rx_pending;
9007 bp->tx_ring_size = ering->tx_pending;
9009 if (netif_running(dev)) {
9010 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9011 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9017 static void bnx2x_get_pauseparam(struct net_device *dev,
9018 struct ethtool_pauseparam *epause)
9020 struct bnx2x *bp = netdev_priv(dev);
9022 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9023 BNX2X_FLOW_CTRL_AUTO) &&
9024 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9026 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9027 BNX2X_FLOW_CTRL_RX);
9028 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9029 BNX2X_FLOW_CTRL_TX);
9031 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9032 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9033 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9036 static int bnx2x_set_pauseparam(struct net_device *dev,
9037 struct ethtool_pauseparam *epause)
9039 struct bnx2x *bp = netdev_priv(dev);
9044 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9045 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9046 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9048 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9050 if (epause->rx_pause)
9051 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9053 if (epause->tx_pause)
9054 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9056 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9057 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9059 if (epause->autoneg) {
9060 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9061 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9065 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9066 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9070 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9072 if (netif_running(dev)) {
9073 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9080 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9082 struct bnx2x *bp = netdev_priv(dev);
9086 /* TPA requires Rx CSUM offloading */
9087 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9088 if (!(dev->features & NETIF_F_LRO)) {
9089 dev->features |= NETIF_F_LRO;
9090 bp->flags |= TPA_ENABLE_FLAG;
9094 } else if (dev->features & NETIF_F_LRO) {
9095 dev->features &= ~NETIF_F_LRO;
9096 bp->flags &= ~TPA_ENABLE_FLAG;
9100 if (changed && netif_running(dev)) {
9101 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9102 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9108 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9110 struct bnx2x *bp = netdev_priv(dev);
9115 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9117 struct bnx2x *bp = netdev_priv(dev);
9122 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9123 TPA'ed packets will be discarded due to wrong TCP CSUM */
9125 u32 flags = ethtool_op_get_flags(dev);
9127 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9133 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9136 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9137 dev->features |= NETIF_F_TSO6;
9139 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9140 dev->features &= ~NETIF_F_TSO6;
9146 static const struct {
9147 char string[ETH_GSTRING_LEN];
9148 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9149 { "register_test (offline)" },
9150 { "memory_test (offline)" },
9151 { "loopback_test (offline)" },
9152 { "nvram_test (online)" },
9153 { "interrupt_test (online)" },
9154 { "link_test (online)" },
9155 { "idle check (online)" }
9158 static int bnx2x_self_test_count(struct net_device *dev)
9160 return BNX2X_NUM_TESTS;
9163 static int bnx2x_test_registers(struct bnx2x *bp)
9165 int idx, i, rc = -ENODEV;
9167 int port = BP_PORT(bp);
9168 static const struct {
9173 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9174 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9175 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9176 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9177 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9178 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9179 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9180 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9181 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9182 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9183 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9184 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9185 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9186 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9187 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9188 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9189 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9190 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9191 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9192 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9193 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9194 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9195 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9196 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9197 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9198 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9199 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9200 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9201 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9202 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9203 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9204 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9205 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9206 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9207 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9208 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9209 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9210 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9212 { 0xffffffff, 0, 0x00000000 }
9215 if (!netif_running(bp->dev))
9218 /* Repeat the test twice:
9219 First by writing 0x00000000, second by writing 0xffffffff */
9220 for (idx = 0; idx < 2; idx++) {
9227 wr_val = 0xffffffff;
9231 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9232 u32 offset, mask, save_val, val;
9234 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9235 mask = reg_tbl[i].mask;
9237 save_val = REG_RD(bp, offset);
9239 REG_WR(bp, offset, wr_val);
9240 val = REG_RD(bp, offset);
9242 /* Restore the original register's value */
9243 REG_WR(bp, offset, save_val);
9245 /* verify that value is as expected value */
9246 if ((val & mask) != (wr_val & mask))
9257 static int bnx2x_test_memory(struct bnx2x *bp)
9259 int i, j, rc = -ENODEV;
9261 static const struct {
9265 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9266 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9267 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9268 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9269 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9270 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9271 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9275 static const struct {
9281 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9282 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9283 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9284 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9285 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9286 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9288 { NULL, 0xffffffff, 0, 0 }
9291 if (!netif_running(bp->dev))
9294 /* Go through all the memories */
9295 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9296 for (j = 0; j < mem_tbl[i].size; j++)
9297 REG_RD(bp, mem_tbl[i].offset + j*4);
9299 /* Check the parity status */
9300 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9301 val = REG_RD(bp, prty_tbl[i].offset);
9302 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9303 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9305 "%s is 0x%x\n", prty_tbl[i].name, val);
9316 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9321 while (bnx2x_link_test(bp) && cnt--)
9325 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9327 unsigned int pkt_size, num_pkts, i;
9328 struct sk_buff *skb;
9329 unsigned char *packet;
9330 struct bnx2x_fastpath *fp = &bp->fp[0];
9331 u16 tx_start_idx, tx_idx;
9332 u16 rx_start_idx, rx_idx;
9334 struct sw_tx_bd *tx_buf;
9335 struct eth_tx_bd *tx_bd;
9337 union eth_rx_cqe *cqe;
9339 struct sw_rx_bd *rx_buf;
9343 /* check the loopback mode */
9344 switch (loopback_mode) {
9345 case BNX2X_PHY_LOOPBACK:
9346 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9349 case BNX2X_MAC_LOOPBACK:
9350 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9351 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9357 /* prepare the loopback packet */
9358 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9359 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9360 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9363 goto test_loopback_exit;
9365 packet = skb_put(skb, pkt_size);
9366 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9367 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9368 for (i = ETH_HLEN; i < pkt_size; i++)
9369 packet[i] = (unsigned char) (i & 0xff);
9371 /* send the loopback packet */
9373 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9374 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9376 pkt_prod = fp->tx_pkt_prod++;
9377 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9378 tx_buf->first_bd = fp->tx_bd_prod;
9381 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9382 mapping = pci_map_single(bp->pdev, skb->data,
9383 skb_headlen(skb), PCI_DMA_TODEVICE);
9384 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9385 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9386 tx_bd->nbd = cpu_to_le16(1);
9387 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9388 tx_bd->vlan = cpu_to_le16(pkt_prod);
9389 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9390 ETH_TX_BD_FLAGS_END_BD);
9391 tx_bd->general_data = ((UNICAST_ADDRESS <<
9392 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9396 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
9397 mb(); /* FW restriction: must not reorder writing nbd and packets */
9398 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
9399 DOORBELL(bp, fp->index, 0);
9405 bp->dev->trans_start = jiffies;
9409 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9410 if (tx_idx != tx_start_idx + num_pkts)
9411 goto test_loopback_exit;
9413 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9414 if (rx_idx != rx_start_idx + num_pkts)
9415 goto test_loopback_exit;
9417 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9418 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9419 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9420 goto test_loopback_rx_exit;
9422 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9423 if (len != pkt_size)
9424 goto test_loopback_rx_exit;
9426 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9428 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9429 for (i = ETH_HLEN; i < pkt_size; i++)
9430 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9431 goto test_loopback_rx_exit;
9435 test_loopback_rx_exit:
9437 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9438 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9439 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9440 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9442 /* Update producers */
9443 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9447 bp->link_params.loopback_mode = LOOPBACK_NONE;
9452 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9456 if (!netif_running(bp->dev))
9457 return BNX2X_LOOPBACK_FAILED;
9459 bnx2x_netif_stop(bp, 1);
9460 bnx2x_acquire_phy_lock(bp);
9462 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9464 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9465 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9468 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9470 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9471 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9474 bnx2x_release_phy_lock(bp);
9475 bnx2x_netif_start(bp);
9480 #define CRC32_RESIDUAL 0xdebb20e3
9482 static int bnx2x_test_nvram(struct bnx2x *bp)
9484 static const struct {
9488 { 0, 0x14 }, /* bootstrap */
9489 { 0x14, 0xec }, /* dir */
9490 { 0x100, 0x350 }, /* manuf_info */
9491 { 0x450, 0xf0 }, /* feature_info */
9492 { 0x640, 0x64 }, /* upgrade_key_info */
9494 { 0x708, 0x70 }, /* manuf_key_info */
9498 __be32 buf[0x350 / 4];
9499 u8 *data = (u8 *)buf;
9503 rc = bnx2x_nvram_read(bp, 0, data, 4);
9505 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9506 goto test_nvram_exit;
9509 magic = be32_to_cpu(buf[0]);
9510 if (magic != 0x669955aa) {
9511 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9513 goto test_nvram_exit;
9516 for (i = 0; nvram_tbl[i].size; i++) {
9518 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9522 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
9523 goto test_nvram_exit;
9526 csum = ether_crc_le(nvram_tbl[i].size, data);
9527 if (csum != CRC32_RESIDUAL) {
9529 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9531 goto test_nvram_exit;
9539 static int bnx2x_test_intr(struct bnx2x *bp)
9541 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9544 if (!netif_running(bp->dev))
9547 config->hdr.length = 0;
9549 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9551 config->hdr.offset = BP_FUNC(bp);
9552 config->hdr.client_id = bp->fp->cl_id;
9553 config->hdr.reserved1 = 0;
9555 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9556 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9557 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9559 bp->set_mac_pending++;
9560 for (i = 0; i < 10; i++) {
9561 if (!bp->set_mac_pending)
9563 msleep_interruptible(10);
9572 static void bnx2x_self_test(struct net_device *dev,
9573 struct ethtool_test *etest, u64 *buf)
9575 struct bnx2x *bp = netdev_priv(dev);
9577 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9579 if (!netif_running(dev))
9582 /* offline tests are not supported in MF mode */
9584 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9586 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9589 link_up = bp->link_vars.link_up;
9590 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9591 bnx2x_nic_load(bp, LOAD_DIAG);
9592 /* wait until link state is restored */
9593 bnx2x_wait_for_link(bp, link_up);
9595 if (bnx2x_test_registers(bp) != 0) {
9597 etest->flags |= ETH_TEST_FL_FAILED;
9599 if (bnx2x_test_memory(bp) != 0) {
9601 etest->flags |= ETH_TEST_FL_FAILED;
9603 buf[2] = bnx2x_test_loopback(bp, link_up);
9605 etest->flags |= ETH_TEST_FL_FAILED;
9607 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9608 bnx2x_nic_load(bp, LOAD_NORMAL);
9609 /* wait until link state is restored */
9610 bnx2x_wait_for_link(bp, link_up);
9612 if (bnx2x_test_nvram(bp) != 0) {
9614 etest->flags |= ETH_TEST_FL_FAILED;
9616 if (bnx2x_test_intr(bp) != 0) {
9618 etest->flags |= ETH_TEST_FL_FAILED;
9621 if (bnx2x_link_test(bp) != 0) {
9623 etest->flags |= ETH_TEST_FL_FAILED;
9626 #ifdef BNX2X_EXTRA_DEBUG
9627 bnx2x_panic_dump(bp);
9631 static const struct {
9634 u8 string[ETH_GSTRING_LEN];
9635 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9636 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9637 { Q_STATS_OFFSET32(error_bytes_received_hi),
9638 8, "[%d]: rx_error_bytes" },
9639 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9640 8, "[%d]: rx_ucast_packets" },
9641 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9642 8, "[%d]: rx_mcast_packets" },
9643 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9644 8, "[%d]: rx_bcast_packets" },
9645 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9646 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9647 4, "[%d]: rx_phy_ip_err_discards"},
9648 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9649 4, "[%d]: rx_skb_alloc_discard" },
9650 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9652 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9653 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9654 8, "[%d]: tx_packets" }
9657 static const struct {
9661 #define STATS_FLAGS_PORT 1
9662 #define STATS_FLAGS_FUNC 2
9663 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9664 u8 string[ETH_GSTRING_LEN];
9665 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9666 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9667 8, STATS_FLAGS_BOTH, "rx_bytes" },
9668 { STATS_OFFSET32(error_bytes_received_hi),
9669 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9670 { STATS_OFFSET32(total_unicast_packets_received_hi),
9671 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9672 { STATS_OFFSET32(total_multicast_packets_received_hi),
9673 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9674 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9675 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9676 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9677 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9678 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9679 8, STATS_FLAGS_PORT, "rx_align_errors" },
9680 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9681 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9682 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9683 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9684 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9685 8, STATS_FLAGS_PORT, "rx_fragments" },
9686 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9687 8, STATS_FLAGS_PORT, "rx_jabbers" },
9688 { STATS_OFFSET32(no_buff_discard_hi),
9689 8, STATS_FLAGS_BOTH, "rx_discards" },
9690 { STATS_OFFSET32(mac_filter_discard),
9691 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9692 { STATS_OFFSET32(xxoverflow_discard),
9693 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9694 { STATS_OFFSET32(brb_drop_hi),
9695 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9696 { STATS_OFFSET32(brb_truncate_hi),
9697 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9698 { STATS_OFFSET32(pause_frames_received_hi),
9699 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9700 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9701 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9702 { STATS_OFFSET32(nig_timer_max),
9703 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9704 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9705 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9706 { STATS_OFFSET32(rx_skb_alloc_failed),
9707 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9708 { STATS_OFFSET32(hw_csum_err),
9709 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9711 { STATS_OFFSET32(total_bytes_transmitted_hi),
9712 8, STATS_FLAGS_BOTH, "tx_bytes" },
9713 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9714 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9715 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9716 8, STATS_FLAGS_BOTH, "tx_packets" },
9717 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9718 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9719 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9720 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9721 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9722 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9723 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9724 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9725 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9726 8, STATS_FLAGS_PORT, "tx_deferred" },
9727 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9728 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9729 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9730 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9731 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9732 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9733 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9734 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9735 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9736 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9737 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9738 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9739 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9740 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9741 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9742 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9743 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9744 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9745 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9746 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9747 { STATS_OFFSET32(pause_frames_sent_hi),
9748 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9751 #define IS_PORT_STAT(i) \
9752 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9753 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9754 #define IS_E1HMF_MODE_STAT(bp) \
9755 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9757 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9759 struct bnx2x *bp = netdev_priv(dev);
9762 switch (stringset) {
9766 for_each_queue(bp, i) {
9767 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9768 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9769 bnx2x_q_stats_arr[j].string, i);
9770 k += BNX2X_NUM_Q_STATS;
9772 if (IS_E1HMF_MODE_STAT(bp))
9774 for (j = 0; j < BNX2X_NUM_STATS; j++)
9775 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9776 bnx2x_stats_arr[j].string);
9778 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9779 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9781 strcpy(buf + j*ETH_GSTRING_LEN,
9782 bnx2x_stats_arr[i].string);
9789 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9794 static int bnx2x_get_stats_count(struct net_device *dev)
9796 struct bnx2x *bp = netdev_priv(dev);
9800 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9801 if (!IS_E1HMF_MODE_STAT(bp))
9802 num_stats += BNX2X_NUM_STATS;
9804 if (IS_E1HMF_MODE_STAT(bp)) {
9806 for (i = 0; i < BNX2X_NUM_STATS; i++)
9807 if (IS_FUNC_STAT(i))
9810 num_stats = BNX2X_NUM_STATS;
9816 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9817 struct ethtool_stats *stats, u64 *buf)
9819 struct bnx2x *bp = netdev_priv(dev);
9820 u32 *hw_stats, *offset;
9825 for_each_queue(bp, i) {
9826 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9827 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9828 if (bnx2x_q_stats_arr[j].size == 0) {
9829 /* skip this counter */
9833 offset = (hw_stats +
9834 bnx2x_q_stats_arr[j].offset);
9835 if (bnx2x_q_stats_arr[j].size == 4) {
9836 /* 4-byte counter */
9837 buf[k + j] = (u64) *offset;
9840 /* 8-byte counter */
9841 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9843 k += BNX2X_NUM_Q_STATS;
9845 if (IS_E1HMF_MODE_STAT(bp))
9847 hw_stats = (u32 *)&bp->eth_stats;
9848 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9849 if (bnx2x_stats_arr[j].size == 0) {
9850 /* skip this counter */
9854 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9855 if (bnx2x_stats_arr[j].size == 4) {
9856 /* 4-byte counter */
9857 buf[k + j] = (u64) *offset;
9860 /* 8-byte counter */
9861 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9864 hw_stats = (u32 *)&bp->eth_stats;
9865 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9866 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9868 if (bnx2x_stats_arr[i].size == 0) {
9869 /* skip this counter */
9874 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9875 if (bnx2x_stats_arr[i].size == 4) {
9876 /* 4-byte counter */
9877 buf[j] = (u64) *offset;
9881 /* 8-byte counter */
9882 buf[j] = HILO_U64(*offset, *(offset + 1));
9888 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9890 struct bnx2x *bp = netdev_priv(dev);
9891 int port = BP_PORT(bp);
9894 if (!netif_running(dev))
9903 for (i = 0; i < (data * 2); i++) {
9905 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9906 bp->link_params.hw_led_mode,
9907 bp->link_params.chip_id);
9909 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9910 bp->link_params.hw_led_mode,
9911 bp->link_params.chip_id);
9913 msleep_interruptible(500);
9914 if (signal_pending(current))
9918 if (bp->link_vars.link_up)
9919 bnx2x_set_led(bp, port, LED_MODE_OPER,
9920 bp->link_vars.line_speed,
9921 bp->link_params.hw_led_mode,
9922 bp->link_params.chip_id);
9927 static struct ethtool_ops bnx2x_ethtool_ops = {
9928 .get_settings = bnx2x_get_settings,
9929 .set_settings = bnx2x_set_settings,
9930 .get_drvinfo = bnx2x_get_drvinfo,
9931 .get_wol = bnx2x_get_wol,
9932 .set_wol = bnx2x_set_wol,
9933 .get_msglevel = bnx2x_get_msglevel,
9934 .set_msglevel = bnx2x_set_msglevel,
9935 .nway_reset = bnx2x_nway_reset,
9936 .get_link = ethtool_op_get_link,
9937 .get_eeprom_len = bnx2x_get_eeprom_len,
9938 .get_eeprom = bnx2x_get_eeprom,
9939 .set_eeprom = bnx2x_set_eeprom,
9940 .get_coalesce = bnx2x_get_coalesce,
9941 .set_coalesce = bnx2x_set_coalesce,
9942 .get_ringparam = bnx2x_get_ringparam,
9943 .set_ringparam = bnx2x_set_ringparam,
9944 .get_pauseparam = bnx2x_get_pauseparam,
9945 .set_pauseparam = bnx2x_set_pauseparam,
9946 .get_rx_csum = bnx2x_get_rx_csum,
9947 .set_rx_csum = bnx2x_set_rx_csum,
9948 .get_tx_csum = ethtool_op_get_tx_csum,
9949 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9950 .set_flags = bnx2x_set_flags,
9951 .get_flags = ethtool_op_get_flags,
9952 .get_sg = ethtool_op_get_sg,
9953 .set_sg = ethtool_op_set_sg,
9954 .get_tso = ethtool_op_get_tso,
9955 .set_tso = bnx2x_set_tso,
9956 .self_test_count = bnx2x_self_test_count,
9957 .self_test = bnx2x_self_test,
9958 .get_strings = bnx2x_get_strings,
9959 .phys_id = bnx2x_phys_id,
9960 .get_stats_count = bnx2x_get_stats_count,
9961 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9964 /* end of ethtool_ops */
9966 /****************************************************************************
9967 * General service functions
9968 ****************************************************************************/
9970 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9974 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9978 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9979 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9980 PCI_PM_CTRL_PME_STATUS));
9982 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9983 /* delay required during transition out of D3hot */
9988 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9992 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9994 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9997 /* No more memory access after this point until
9998 * device is brought back to D0.
10008 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10012 /* Tell compiler that status block fields can change */
10014 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10015 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10017 return (fp->rx_comp_cons != rx_cons_sb);
10021 * net_device service functions
10024 static int bnx2x_poll(struct napi_struct *napi, int budget)
10026 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10028 struct bnx2x *bp = fp->bp;
10031 #ifdef BNX2X_STOP_ON_ERROR
10032 if (unlikely(bp->panic))
10036 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10037 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10038 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10040 bnx2x_update_fpsb_idx(fp);
10042 if (bnx2x_has_tx_work(fp))
10045 if (bnx2x_has_rx_work(fp)) {
10046 work_done = bnx2x_rx_int(fp, budget);
10048 /* must not complete if we consumed full budget */
10049 if (work_done >= budget)
10053 /* BNX2X_HAS_WORK() reads the status block, thus we need to
10054 * ensure that status block indices have been actually read
10055 * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10056 * so that we won't write the "newer" value of the status block to IGU
10057 * (if there was a DMA right after BNX2X_HAS_WORK and
10058 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10059 * may be postponed to right before bnx2x_ack_sb). In this case
10060 * there will never be another interrupt until there is another update
10061 * of the status block, while there is still unhandled work.
10065 if (!BNX2X_HAS_WORK(fp)) {
10066 #ifdef BNX2X_STOP_ON_ERROR
10069 napi_complete(napi);
10071 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10072 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10073 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10074 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10082 /* we split the first BD into headers and data BDs
10083 * to ease the pain of our fellow microcode engineers
10084 * we use one mapping for both BDs
10085 * So far this has only been observed to happen
10086 * in Other Operating Systems(TM)
10088 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10089 struct bnx2x_fastpath *fp,
10090 struct eth_tx_bd **tx_bd, u16 hlen,
10091 u16 bd_prod, int nbd)
10093 struct eth_tx_bd *h_tx_bd = *tx_bd;
10094 struct eth_tx_bd *d_tx_bd;
10095 dma_addr_t mapping;
10096 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10098 /* first fix first BD */
10099 h_tx_bd->nbd = cpu_to_le16(nbd);
10100 h_tx_bd->nbytes = cpu_to_le16(hlen);
10102 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10103 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10104 h_tx_bd->addr_lo, h_tx_bd->nbd);
10106 /* now get a new data BD
10107 * (after the pbd) and fill it */
10108 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10109 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10111 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10112 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10114 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10115 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10116 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10118 /* this marks the BD as one that has no individual mapping
10119 * the FW ignores this flag in a BD not marked start
10121 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10122 DP(NETIF_MSG_TX_QUEUED,
10123 "TSO split data size is %d (%x:%x)\n",
10124 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10126 /* update tx_bd for marking the last BD flag */
10132 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10135 csum = (u16) ~csum_fold(csum_sub(csum,
10136 csum_partial(t_header - fix, fix, 0)));
10139 csum = (u16) ~csum_fold(csum_add(csum,
10140 csum_partial(t_header, -fix, 0)));
10142 return swab16(csum);
10145 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10149 if (skb->ip_summed != CHECKSUM_PARTIAL)
10153 if (skb->protocol == htons(ETH_P_IPV6)) {
10155 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10156 rc |= XMIT_CSUM_TCP;
10160 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10161 rc |= XMIT_CSUM_TCP;
10165 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10168 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10174 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10175 /* check if packet requires linearization (packet is too fragmented)
10176 no need to check fragmentation if page size > 8K (there will be no
10177 violation to FW restrictions) */
10178 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10183 int first_bd_sz = 0;
10185 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10186 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10188 if (xmit_type & XMIT_GSO) {
10189 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10190 /* Check if LSO packet needs to be copied:
10191 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10192 int wnd_size = MAX_FETCH_BD - 3;
10193 /* Number of windows to check */
10194 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10199 /* Headers length */
10200 hlen = (int)(skb_transport_header(skb) - skb->data) +
10203 /* Amount of data (w/o headers) on linear part of SKB*/
10204 first_bd_sz = skb_headlen(skb) - hlen;
10206 wnd_sum = first_bd_sz;
10208 /* Calculate the first sum - it's special */
10209 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10211 skb_shinfo(skb)->frags[frag_idx].size;
10213 /* If there was data on linear skb data - check it */
10214 if (first_bd_sz > 0) {
10215 if (unlikely(wnd_sum < lso_mss)) {
10220 wnd_sum -= first_bd_sz;
10223 /* Others are easier: run through the frag list and
10224 check all windows */
10225 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10227 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10229 if (unlikely(wnd_sum < lso_mss)) {
10234 skb_shinfo(skb)->frags[wnd_idx].size;
10237 /* in non-LSO too fragmented packet should always
10244 if (unlikely(to_copy))
10245 DP(NETIF_MSG_TX_QUEUED,
10246 "Linearization IS REQUIRED for %s packet. "
10247 "num_frags %d hlen %d first_bd_sz %d\n",
10248 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10249 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10255 /* called with netif_tx_lock
10256 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10257 * netif_wake_queue()
10259 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10261 struct bnx2x *bp = netdev_priv(dev);
10262 struct bnx2x_fastpath *fp;
10263 struct netdev_queue *txq;
10264 struct sw_tx_bd *tx_buf;
10265 struct eth_tx_bd *tx_bd;
10266 struct eth_tx_parse_bd *pbd = NULL;
10267 u16 pkt_prod, bd_prod;
10269 dma_addr_t mapping;
10270 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10271 int vlan_off = (bp->e1hov ? 4 : 0);
10275 #ifdef BNX2X_STOP_ON_ERROR
10276 if (unlikely(bp->panic))
10277 return NETDEV_TX_BUSY;
10280 fp_index = skb_get_queue_mapping(skb);
10281 txq = netdev_get_tx_queue(dev, fp_index);
10283 fp = &bp->fp[fp_index];
10285 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10286 fp->eth_q_stats.driver_xoff++,
10287 netif_tx_stop_queue(txq);
10288 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10289 return NETDEV_TX_BUSY;
10292 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10293 " gso type %x xmit_type %x\n",
10294 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10295 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10297 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10298 /* First, check if we need to linearize the skb (due to FW
10299 restrictions). No need to check fragmentation if page size > 8K
10300 (there will be no violation to FW restrictions) */
10301 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10302 /* Statistics of linearization */
10304 if (skb_linearize(skb) != 0) {
10305 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10306 "silently dropping this SKB\n");
10307 dev_kfree_skb_any(skb);
10308 return NETDEV_TX_OK;
10314 Please read carefully. First we use one BD which we mark as start,
10315 then for TSO or xsum we have a parsing info BD,
10316 and only then we have the rest of the TSO BDs.
10317 (don't forget to mark the last one as last,
10318 and to unmap only AFTER you write to the BD ...)
10319 And above all, all pdb sizes are in words - NOT DWORDS!
10322 pkt_prod = fp->tx_pkt_prod++;
10323 bd_prod = TX_BD(fp->tx_bd_prod);
10325 /* get a tx_buf and first BD */
10326 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10327 tx_bd = &fp->tx_desc_ring[bd_prod];
10329 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10330 tx_bd->general_data = (UNICAST_ADDRESS <<
10331 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10333 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10335 /* remember the first BD of the packet */
10336 tx_buf->first_bd = fp->tx_bd_prod;
10339 DP(NETIF_MSG_TX_QUEUED,
10340 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10341 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10344 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10345 (bp->flags & HW_VLAN_TX_FLAG)) {
10346 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10347 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10351 tx_bd->vlan = cpu_to_le16(pkt_prod);
10354 /* turn on parsing and get a BD */
10355 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10356 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10358 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10361 if (xmit_type & XMIT_CSUM) {
10362 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10364 /* for now NS flag is not used in Linux */
10366 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10367 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10369 pbd->ip_hlen = (skb_transport_header(skb) -
10370 skb_network_header(skb)) / 2;
10372 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10374 pbd->total_hlen = cpu_to_le16(hlen);
10375 hlen = hlen*2 - vlan_off;
10377 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10379 if (xmit_type & XMIT_CSUM_V4)
10380 tx_bd->bd_flags.as_bitfield |=
10381 ETH_TX_BD_FLAGS_IP_CSUM;
10383 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10385 if (xmit_type & XMIT_CSUM_TCP) {
10386 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10389 s8 fix = SKB_CS_OFF(skb); /* signed! */
10391 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10392 pbd->cs_offset = fix / 2;
10394 DP(NETIF_MSG_TX_QUEUED,
10395 "hlen %d offset %d fix %d csum before fix %x\n",
10396 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10399 /* HW bug: fixup the CSUM */
10400 pbd->tcp_pseudo_csum =
10401 bnx2x_csum_fix(skb_transport_header(skb),
10404 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10405 pbd->tcp_pseudo_csum);
10409 mapping = pci_map_single(bp->pdev, skb->data,
10410 skb_headlen(skb), PCI_DMA_TODEVICE);
10412 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10413 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10414 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10415 tx_bd->nbd = cpu_to_le16(nbd);
10416 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10418 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
10419 " nbytes %d flags %x vlan %x\n",
10420 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10421 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10422 le16_to_cpu(tx_bd->vlan));
10424 if (xmit_type & XMIT_GSO) {
10426 DP(NETIF_MSG_TX_QUEUED,
10427 "TSO packet len %d hlen %d total len %d tso size %d\n",
10428 skb->len, hlen, skb_headlen(skb),
10429 skb_shinfo(skb)->gso_size);
10431 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10433 if (unlikely(skb_headlen(skb) > hlen))
10434 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10437 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10438 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10439 pbd->tcp_flags = pbd_tcp_flags(skb);
10441 if (xmit_type & XMIT_GSO_V4) {
10442 pbd->ip_id = swab16(ip_hdr(skb)->id);
10443 pbd->tcp_pseudo_csum =
10444 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10445 ip_hdr(skb)->daddr,
10446 0, IPPROTO_TCP, 0));
10449 pbd->tcp_pseudo_csum =
10450 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10451 &ipv6_hdr(skb)->daddr,
10452 0, IPPROTO_TCP, 0));
10454 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10457 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10458 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10460 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10461 tx_bd = &fp->tx_desc_ring[bd_prod];
10463 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10464 frag->size, PCI_DMA_TODEVICE);
10466 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10467 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10468 tx_bd->nbytes = cpu_to_le16(frag->size);
10469 tx_bd->vlan = cpu_to_le16(pkt_prod);
10470 tx_bd->bd_flags.as_bitfield = 0;
10472 DP(NETIF_MSG_TX_QUEUED,
10473 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10474 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10475 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10478 /* now at last mark the BD as the last BD */
10479 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10481 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10482 tx_bd, tx_bd->bd_flags.as_bitfield);
10484 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10486 /* now send a tx doorbell, counting the next BD
10487 * if the packet contains or ends with it
10489 if (TX_BD_POFF(bd_prod) < nbd)
10493 DP(NETIF_MSG_TX_QUEUED,
10494 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10495 " tcp_flags %x xsum %x seq %u hlen %u\n",
10496 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10497 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10498 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10500 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
10503 * Make sure that the BD data is updated before updating the producer
10504 * since FW might read the BD right after the producer is updated.
10505 * This is only applicable for weak-ordered memory model archs such
10506 * as IA-64. The following barrier is also mandatory since FW will
10507 * assumes packets must have BDs.
10511 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
10512 mb(); /* FW restriction: must not reorder writing nbd and packets */
10513 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
10514 DOORBELL(bp, fp->index, 0);
10518 fp->tx_bd_prod += nbd;
10519 dev->trans_start = jiffies;
10521 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10522 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10523 if we put Tx into XOFF state. */
10525 netif_tx_stop_queue(txq);
10526 fp->eth_q_stats.driver_xoff++;
10527 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10528 netif_tx_wake_queue(txq);
10532 return NETDEV_TX_OK;
10535 /* called with rtnl_lock */
10536 static int bnx2x_open(struct net_device *dev)
10538 struct bnx2x *bp = netdev_priv(dev);
10540 netif_carrier_off(dev);
10542 bnx2x_set_power_state(bp, PCI_D0);
10544 return bnx2x_nic_load(bp, LOAD_OPEN);
10547 /* called with rtnl_lock */
10548 static int bnx2x_close(struct net_device *dev)
10550 struct bnx2x *bp = netdev_priv(dev);
10552 /* Unload the driver, release IRQs */
10553 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10554 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10555 if (!CHIP_REV_IS_SLOW(bp))
10556 bnx2x_set_power_state(bp, PCI_D3hot);
10561 /* called with netif_tx_lock from dev_mcast.c */
10562 static void bnx2x_set_rx_mode(struct net_device *dev)
10564 struct bnx2x *bp = netdev_priv(dev);
10565 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10566 int port = BP_PORT(bp);
10568 if (bp->state != BNX2X_STATE_OPEN) {
10569 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10573 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10575 if (dev->flags & IFF_PROMISC)
10576 rx_mode = BNX2X_RX_MODE_PROMISC;
10578 else if ((dev->flags & IFF_ALLMULTI) ||
10579 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10580 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10582 else { /* some multicasts */
10583 if (CHIP_IS_E1(bp)) {
10584 int i, old, offset;
10585 struct dev_mc_list *mclist;
10586 struct mac_configuration_cmd *config =
10587 bnx2x_sp(bp, mcast_config);
10589 for (i = 0, mclist = dev->mc_list;
10590 mclist && (i < dev->mc_count);
10591 i++, mclist = mclist->next) {
10593 config->config_table[i].
10594 cam_entry.msb_mac_addr =
10595 swab16(*(u16 *)&mclist->dmi_addr[0]);
10596 config->config_table[i].
10597 cam_entry.middle_mac_addr =
10598 swab16(*(u16 *)&mclist->dmi_addr[2]);
10599 config->config_table[i].
10600 cam_entry.lsb_mac_addr =
10601 swab16(*(u16 *)&mclist->dmi_addr[4]);
10602 config->config_table[i].cam_entry.flags =
10604 config->config_table[i].
10605 target_table_entry.flags = 0;
10606 config->config_table[i].
10607 target_table_entry.client_id = 0;
10608 config->config_table[i].
10609 target_table_entry.vlan_id = 0;
10612 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10613 config->config_table[i].
10614 cam_entry.msb_mac_addr,
10615 config->config_table[i].
10616 cam_entry.middle_mac_addr,
10617 config->config_table[i].
10618 cam_entry.lsb_mac_addr);
10620 old = config->hdr.length;
10622 for (; i < old; i++) {
10623 if (CAM_IS_INVALID(config->
10624 config_table[i])) {
10625 /* already invalidated */
10629 CAM_INVALIDATE(config->
10634 if (CHIP_REV_IS_SLOW(bp))
10635 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10637 offset = BNX2X_MAX_MULTICAST*(1 + port);
10639 config->hdr.length = i;
10640 config->hdr.offset = offset;
10641 config->hdr.client_id = bp->fp->cl_id;
10642 config->hdr.reserved1 = 0;
10644 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10645 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10646 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10649 /* Accept one or more multicasts */
10650 struct dev_mc_list *mclist;
10651 u32 mc_filter[MC_HASH_SIZE];
10652 u32 crc, bit, regidx;
10655 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10657 for (i = 0, mclist = dev->mc_list;
10658 mclist && (i < dev->mc_count);
10659 i++, mclist = mclist->next) {
10661 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10664 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10665 bit = (crc >> 24) & 0xff;
10668 mc_filter[regidx] |= (1 << bit);
10671 for (i = 0; i < MC_HASH_SIZE; i++)
10672 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10677 bp->rx_mode = rx_mode;
10678 bnx2x_set_storm_rx_mode(bp);
10681 /* called with rtnl_lock */
10682 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10684 struct sockaddr *addr = p;
10685 struct bnx2x *bp = netdev_priv(dev);
10687 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10690 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10691 if (netif_running(dev)) {
10692 if (CHIP_IS_E1(bp))
10693 bnx2x_set_mac_addr_e1(bp, 1);
10695 bnx2x_set_mac_addr_e1h(bp, 1);
10701 /* called with rtnl_lock */
10702 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10704 struct mii_ioctl_data *data = if_mii(ifr);
10705 struct bnx2x *bp = netdev_priv(dev);
10706 int port = BP_PORT(bp);
10711 data->phy_id = bp->port.phy_addr;
10715 case SIOCGMIIREG: {
10718 if (!netif_running(dev))
10721 mutex_lock(&bp->port.phy_mutex);
10722 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10723 DEFAULT_PHY_DEV_ADDR,
10724 (data->reg_num & 0x1f), &mii_regval);
10725 data->val_out = mii_regval;
10726 mutex_unlock(&bp->port.phy_mutex);
10731 if (!capable(CAP_NET_ADMIN))
10734 if (!netif_running(dev))
10737 mutex_lock(&bp->port.phy_mutex);
10738 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10739 DEFAULT_PHY_DEV_ADDR,
10740 (data->reg_num & 0x1f), data->val_in);
10741 mutex_unlock(&bp->port.phy_mutex);
10749 return -EOPNOTSUPP;
10752 /* called with rtnl_lock */
10753 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10755 struct bnx2x *bp = netdev_priv(dev);
10758 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10759 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10762 /* This does not race with packet allocation
10763 * because the actual alloc size is
10764 * only updated as part of load
10766 dev->mtu = new_mtu;
10768 if (netif_running(dev)) {
10769 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10770 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10776 static void bnx2x_tx_timeout(struct net_device *dev)
10778 struct bnx2x *bp = netdev_priv(dev);
10780 #ifdef BNX2X_STOP_ON_ERROR
10784 /* This allows the netif to be shutdown gracefully before resetting */
10785 schedule_work(&bp->reset_task);
10789 /* called with rtnl_lock */
10790 static void bnx2x_vlan_rx_register(struct net_device *dev,
10791 struct vlan_group *vlgrp)
10793 struct bnx2x *bp = netdev_priv(dev);
10797 /* Set flags according to the required capabilities */
10798 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10800 if (dev->features & NETIF_F_HW_VLAN_TX)
10801 bp->flags |= HW_VLAN_TX_FLAG;
10803 if (dev->features & NETIF_F_HW_VLAN_RX)
10804 bp->flags |= HW_VLAN_RX_FLAG;
10806 if (netif_running(dev))
10807 bnx2x_set_client_config(bp);
10812 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10813 static void poll_bnx2x(struct net_device *dev)
10815 struct bnx2x *bp = netdev_priv(dev);
10817 disable_irq(bp->pdev->irq);
10818 bnx2x_interrupt(bp->pdev->irq, dev);
10819 enable_irq(bp->pdev->irq);
10823 static const struct net_device_ops bnx2x_netdev_ops = {
10824 .ndo_open = bnx2x_open,
10825 .ndo_stop = bnx2x_close,
10826 .ndo_start_xmit = bnx2x_start_xmit,
10827 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10828 .ndo_set_mac_address = bnx2x_change_mac_addr,
10829 .ndo_validate_addr = eth_validate_addr,
10830 .ndo_do_ioctl = bnx2x_ioctl,
10831 .ndo_change_mtu = bnx2x_change_mtu,
10832 .ndo_tx_timeout = bnx2x_tx_timeout,
10834 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10836 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10837 .ndo_poll_controller = poll_bnx2x,
10841 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10842 struct net_device *dev)
10847 SET_NETDEV_DEV(dev, &pdev->dev);
10848 bp = netdev_priv(dev);
10853 bp->func = PCI_FUNC(pdev->devfn);
10855 rc = pci_enable_device(pdev);
10857 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10861 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10862 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10865 goto err_out_disable;
10868 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10869 printk(KERN_ERR PFX "Cannot find second PCI device"
10870 " base address, aborting\n");
10872 goto err_out_disable;
10875 if (atomic_read(&pdev->enable_cnt) == 1) {
10876 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10878 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10880 goto err_out_disable;
10883 pci_set_master(pdev);
10884 pci_save_state(pdev);
10887 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10888 if (bp->pm_cap == 0) {
10889 printk(KERN_ERR PFX "Cannot find power management"
10890 " capability, aborting\n");
10892 goto err_out_release;
10895 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10896 if (bp->pcie_cap == 0) {
10897 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10900 goto err_out_release;
10903 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10904 bp->flags |= USING_DAC_FLAG;
10905 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10906 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10907 " failed, aborting\n");
10909 goto err_out_release;
10912 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10913 printk(KERN_ERR PFX "System does not support DMA,"
10916 goto err_out_release;
10919 dev->mem_start = pci_resource_start(pdev, 0);
10920 dev->base_addr = dev->mem_start;
10921 dev->mem_end = pci_resource_end(pdev, 0);
10923 dev->irq = pdev->irq;
10925 bp->regview = pci_ioremap_bar(pdev, 0);
10926 if (!bp->regview) {
10927 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10929 goto err_out_release;
10932 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10933 min_t(u64, BNX2X_DB_SIZE,
10934 pci_resource_len(pdev, 2)));
10935 if (!bp->doorbells) {
10936 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10938 goto err_out_unmap;
10941 bnx2x_set_power_state(bp, PCI_D0);
10943 /* clean indirect addresses */
10944 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10945 PCICFG_VENDOR_ID_OFFSET);
10946 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10947 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10948 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10949 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10951 dev->watchdog_timeo = TX_TIMEOUT;
10953 dev->netdev_ops = &bnx2x_netdev_ops;
10954 dev->ethtool_ops = &bnx2x_ethtool_ops;
10955 dev->features |= NETIF_F_SG;
10956 dev->features |= NETIF_F_HW_CSUM;
10957 if (bp->flags & USING_DAC_FLAG)
10958 dev->features |= NETIF_F_HIGHDMA;
10960 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10961 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10963 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10964 dev->features |= NETIF_F_TSO6;
10970 iounmap(bp->regview);
10971 bp->regview = NULL;
10973 if (bp->doorbells) {
10974 iounmap(bp->doorbells);
10975 bp->doorbells = NULL;
10979 if (atomic_read(&pdev->enable_cnt) == 1)
10980 pci_release_regions(pdev);
10983 pci_disable_device(pdev);
10984 pci_set_drvdata(pdev, NULL);
10990 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10992 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10994 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10998 /* return value of 1=2.5GHz 2=5GHz */
10999 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11001 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11003 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11007 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11008 const struct pci_device_id *ent)
11010 static int version_printed;
11011 struct net_device *dev = NULL;
11015 if (version_printed++ == 0)
11016 printk(KERN_INFO "%s", version);
11018 /* dev zeroed in init_etherdev */
11019 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11021 printk(KERN_ERR PFX "Cannot allocate net device\n");
11025 bp = netdev_priv(dev);
11026 bp->msglevel = debug;
11028 rc = bnx2x_init_dev(pdev, dev);
11034 pci_set_drvdata(pdev, dev);
11036 rc = bnx2x_init_bp(bp);
11038 goto init_one_exit;
11040 rc = register_netdev(dev);
11042 dev_err(&pdev->dev, "Cannot register net device\n");
11043 goto init_one_exit;
11046 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11047 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11048 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11049 bnx2x_get_pcie_width(bp),
11050 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11051 dev->base_addr, bp->pdev->irq);
11052 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11057 iounmap(bp->regview);
11060 iounmap(bp->doorbells);
11064 if (atomic_read(&pdev->enable_cnt) == 1)
11065 pci_release_regions(pdev);
11067 pci_disable_device(pdev);
11068 pci_set_drvdata(pdev, NULL);
11073 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11075 struct net_device *dev = pci_get_drvdata(pdev);
11079 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11082 bp = netdev_priv(dev);
11084 unregister_netdev(dev);
11087 iounmap(bp->regview);
11090 iounmap(bp->doorbells);
11094 if (atomic_read(&pdev->enable_cnt) == 1)
11095 pci_release_regions(pdev);
11097 pci_disable_device(pdev);
11098 pci_set_drvdata(pdev, NULL);
11101 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11103 struct net_device *dev = pci_get_drvdata(pdev);
11107 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11110 bp = netdev_priv(dev);
11114 pci_save_state(pdev);
11116 if (!netif_running(dev)) {
11121 netif_device_detach(dev);
11123 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11125 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11132 static int bnx2x_resume(struct pci_dev *pdev)
11134 struct net_device *dev = pci_get_drvdata(pdev);
11139 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11142 bp = netdev_priv(dev);
11146 pci_restore_state(pdev);
11148 if (!netif_running(dev)) {
11153 bnx2x_set_power_state(bp, PCI_D0);
11154 netif_device_attach(dev);
11156 rc = bnx2x_nic_load(bp, LOAD_OPEN);
11163 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11167 bp->state = BNX2X_STATE_ERROR;
11169 bp->rx_mode = BNX2X_RX_MODE_NONE;
11171 bnx2x_netif_stop(bp, 0);
11173 del_timer_sync(&bp->timer);
11174 bp->stats_state = STATS_STATE_DISABLED;
11175 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11178 bnx2x_free_irq(bp);
11180 if (CHIP_IS_E1(bp)) {
11181 struct mac_configuration_cmd *config =
11182 bnx2x_sp(bp, mcast_config);
11184 for (i = 0; i < config->hdr.length; i++)
11185 CAM_INVALIDATE(config->config_table[i]);
11188 /* Free SKBs, SGEs, TPA pool and driver internals */
11189 bnx2x_free_skbs(bp);
11190 for_each_rx_queue(bp, i)
11191 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11192 for_each_rx_queue(bp, i)
11193 netif_napi_del(&bnx2x_fp(bp, i, napi));
11194 bnx2x_free_mem(bp);
11196 bp->state = BNX2X_STATE_CLOSED;
11198 netif_carrier_off(bp->dev);
11203 static void bnx2x_eeh_recover(struct bnx2x *bp)
11207 mutex_init(&bp->port.phy_mutex);
11209 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11210 bp->link_params.shmem_base = bp->common.shmem_base;
11211 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11213 if (!bp->common.shmem_base ||
11214 (bp->common.shmem_base < 0xA0000) ||
11215 (bp->common.shmem_base >= 0xC0000)) {
11216 BNX2X_DEV_INFO("MCP not active\n");
11217 bp->flags |= NO_MCP_FLAG;
11221 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11222 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11223 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11224 BNX2X_ERR("BAD MCP validity signature\n");
11226 if (!BP_NOMCP(bp)) {
11227 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11228 & DRV_MSG_SEQ_NUMBER_MASK);
11229 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11234 * bnx2x_io_error_detected - called when PCI error is detected
11235 * @pdev: Pointer to PCI device
11236 * @state: The current pci connection state
11238 * This function is called after a PCI bus error affecting
11239 * this device has been detected.
11241 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11242 pci_channel_state_t state)
11244 struct net_device *dev = pci_get_drvdata(pdev);
11245 struct bnx2x *bp = netdev_priv(dev);
11249 netif_device_detach(dev);
11251 if (netif_running(dev))
11252 bnx2x_eeh_nic_unload(bp);
11254 pci_disable_device(pdev);
11258 /* Request a slot reset */
11259 return PCI_ERS_RESULT_NEED_RESET;
11263 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11264 * @pdev: Pointer to PCI device
11266 * Restart the card from scratch, as if from a cold-boot.
11268 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11270 struct net_device *dev = pci_get_drvdata(pdev);
11271 struct bnx2x *bp = netdev_priv(dev);
11275 if (pci_enable_device(pdev)) {
11276 dev_err(&pdev->dev,
11277 "Cannot re-enable PCI device after reset\n");
11279 return PCI_ERS_RESULT_DISCONNECT;
11282 pci_set_master(pdev);
11283 pci_restore_state(pdev);
11285 if (netif_running(dev))
11286 bnx2x_set_power_state(bp, PCI_D0);
11290 return PCI_ERS_RESULT_RECOVERED;
11294 * bnx2x_io_resume - called when traffic can start flowing again
11295 * @pdev: Pointer to PCI device
11297 * This callback is called when the error recovery driver tells us that
11298 * its OK to resume normal operation.
11300 static void bnx2x_io_resume(struct pci_dev *pdev)
11302 struct net_device *dev = pci_get_drvdata(pdev);
11303 struct bnx2x *bp = netdev_priv(dev);
11307 bnx2x_eeh_recover(bp);
11309 if (netif_running(dev))
11310 bnx2x_nic_load(bp, LOAD_NORMAL);
11312 netif_device_attach(dev);
11317 static struct pci_error_handlers bnx2x_err_handler = {
11318 .error_detected = bnx2x_io_error_detected,
11319 .slot_reset = bnx2x_io_slot_reset,
11320 .resume = bnx2x_io_resume,
11323 static struct pci_driver bnx2x_pci_driver = {
11324 .name = DRV_MODULE_NAME,
11325 .id_table = bnx2x_pci_tbl,
11326 .probe = bnx2x_init_one,
11327 .remove = __devexit_p(bnx2x_remove_one),
11328 .suspend = bnx2x_suspend,
11329 .resume = bnx2x_resume,
11330 .err_handler = &bnx2x_err_handler,
11333 static int __init bnx2x_init(void)
11335 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11336 if (bnx2x_wq == NULL) {
11337 printk(KERN_ERR PFX "Cannot create workqueue\n");
11341 return pci_register_driver(&bnx2x_pci_driver);
11344 static void __exit bnx2x_cleanup(void)
11346 pci_unregister_driver(&bnx2x_pci_driver);
11348 destroy_workqueue(bnx2x_wq);
11351 module_init(bnx2x_init);
11352 module_exit(bnx2x_cleanup);