]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/bnx2x_main.c
3ef24b9eab3ed08f9295d39fa067ef34c37e6137
[linux-2.6-omap-h63xx.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56
57 #define DRV_MODULE_VERSION      "1.48.102"
58 #define DRV_MODULE_RELDATE      "2009/02/12"
59 #define BNX2X_BC_VER            0x040200
60
61 /* Time in jiffies before concluding the transmitter is hung */
62 #define TX_TIMEOUT              (5*HZ)
63
64 static char version[] __devinitdata =
65         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
66         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68 MODULE_AUTHOR("Eliezer Tamir");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
72
73 static int multi_mode = 1;
74 module_param(multi_mode, int, 0);
75 MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
76
77 static int disable_tpa;
78 module_param(disable_tpa, int, 0);
79 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
80
81 static int int_mode;
82 module_param(int_mode, int, 0);
83 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
84
85 static int poll;
86 module_param(poll, int, 0);
87 MODULE_PARM_DESC(poll, " Use polling (for debug)");
88
89 static int mrrs = -1;
90 module_param(mrrs, int, 0);
91 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
92
93 static int debug;
94 module_param(debug, int, 0);
95 MODULE_PARM_DESC(debug, " Default debug msglevel");
96
97 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
98
99 static struct workqueue_struct *bnx2x_wq;
100
101 enum bnx2x_board_type {
102         BCM57710 = 0,
103         BCM57711 = 1,
104         BCM57711E = 2,
105 };
106
107 /* indexed by board_type, above */
108 static struct {
109         char *name;
110 } board_info[] __devinitdata = {
111         { "Broadcom NetXtreme II BCM57710 XGb" },
112         { "Broadcom NetXtreme II BCM57711 XGb" },
113         { "Broadcom NetXtreme II BCM57711E XGb" }
114 };
115
116
117 static const struct pci_device_id bnx2x_pci_tbl[] = {
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
119                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
121                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
123                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
124         { 0 }
125 };
126
127 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
128
129 /****************************************************************************
130 * General service functions
131 ****************************************************************************/
132
133 /* used only at init
134  * locking is done by mcp
135  */
136 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
137 {
138         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
139         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
140         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
141                                PCICFG_VENDOR_ID_OFFSET);
142 }
143
144 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
145 {
146         u32 val;
147
148         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
149         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
150         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
151                                PCICFG_VENDOR_ID_OFFSET);
152
153         return val;
154 }
155
156 static const u32 dmae_reg_go_c[] = {
157         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
158         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
159         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
160         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
161 };
162
163 /* copy command into DMAE command memory and set DMAE command go */
164 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
165                             int idx)
166 {
167         u32 cmd_offset;
168         int i;
169
170         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
171         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
172                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
173
174                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
175                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
176         }
177         REG_WR(bp, dmae_reg_go_c[idx], 1);
178 }
179
180 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
181                       u32 len32)
182 {
183         struct dmae_command *dmae = &bp->init_dmae;
184         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
185         int cnt = 200;
186
187         if (!bp->dmae_ready) {
188                 u32 *data = bnx2x_sp(bp, wb_data[0]);
189
190                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
191                    "  using indirect\n", dst_addr, len32);
192                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
193                 return;
194         }
195
196         mutex_lock(&bp->dmae_mutex);
197
198         memset(dmae, 0, sizeof(struct dmae_command));
199
200         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
201                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
202                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
203 #ifdef __BIG_ENDIAN
204                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
205 #else
206                         DMAE_CMD_ENDIANITY_DW_SWAP |
207 #endif
208                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
209                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
210         dmae->src_addr_lo = U64_LO(dma_addr);
211         dmae->src_addr_hi = U64_HI(dma_addr);
212         dmae->dst_addr_lo = dst_addr >> 2;
213         dmae->dst_addr_hi = 0;
214         dmae->len = len32;
215         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
216         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
217         dmae->comp_val = DMAE_COMP_VAL;
218
219         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
220            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
221                     "dst_addr [%x:%08x (%08x)]\n"
222            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
223            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
224            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
225            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
226         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
227            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
228            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
229
230         *wb_comp = 0;
231
232         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
233
234         udelay(5);
235
236         while (*wb_comp != DMAE_COMP_VAL) {
237                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
238
239                 if (!cnt) {
240                         BNX2X_ERR("dmae timeout!\n");
241                         break;
242                 }
243                 cnt--;
244                 /* adjust delay for emulation/FPGA */
245                 if (CHIP_REV_IS_SLOW(bp))
246                         msleep(100);
247                 else
248                         udelay(5);
249         }
250
251         mutex_unlock(&bp->dmae_mutex);
252 }
253
254 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
255 {
256         struct dmae_command *dmae = &bp->init_dmae;
257         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
258         int cnt = 200;
259
260         if (!bp->dmae_ready) {
261                 u32 *data = bnx2x_sp(bp, wb_data[0]);
262                 int i;
263
264                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
265                    "  using indirect\n", src_addr, len32);
266                 for (i = 0; i < len32; i++)
267                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268                 return;
269         }
270
271         mutex_lock(&bp->dmae_mutex);
272
273         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
274         memset(dmae, 0, sizeof(struct dmae_command));
275
276         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
277                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
278                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
279 #ifdef __BIG_ENDIAN
280                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
281 #else
282                         DMAE_CMD_ENDIANITY_DW_SWAP |
283 #endif
284                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
285                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
286         dmae->src_addr_lo = src_addr >> 2;
287         dmae->src_addr_hi = 0;
288         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
289         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
290         dmae->len = len32;
291         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
292         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
293         dmae->comp_val = DMAE_COMP_VAL;
294
295         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
296            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
297                     "dst_addr [%x:%08x (%08x)]\n"
298            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
299            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
300            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
301            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
302
303         *wb_comp = 0;
304
305         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
306
307         udelay(5);
308
309         while (*wb_comp != DMAE_COMP_VAL) {
310
311                 if (!cnt) {
312                         BNX2X_ERR("dmae timeout!\n");
313                         break;
314                 }
315                 cnt--;
316                 /* adjust delay for emulation/FPGA */
317                 if (CHIP_REV_IS_SLOW(bp))
318                         msleep(100);
319                 else
320                         udelay(5);
321         }
322         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
323            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
324            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
325
326         mutex_unlock(&bp->dmae_mutex);
327 }
328
329 /* used only for slowpath so not inlined */
330 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
331 {
332         u32 wb_write[2];
333
334         wb_write[0] = val_hi;
335         wb_write[1] = val_lo;
336         REG_WR_DMAE(bp, reg, wb_write, 2);
337 }
338
339 #ifdef USE_WB_RD
340 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
341 {
342         u32 wb_data[2];
343
344         REG_RD_DMAE(bp, reg, wb_data, 2);
345
346         return HILO_U64(wb_data[0], wb_data[1]);
347 }
348 #endif
349
350 static int bnx2x_mc_assert(struct bnx2x *bp)
351 {
352         char last_idx;
353         int i, rc = 0;
354         u32 row0, row1, row2, row3;
355
356         /* XSTORM */
357         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
358                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
359         if (last_idx)
360                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
361
362         /* print the asserts */
363         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
364
365                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
366                               XSTORM_ASSERT_LIST_OFFSET(i));
367                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
368                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
369                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
370                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
371                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
372                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
373
374                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
375                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
376                                   " 0x%08x 0x%08x 0x%08x\n",
377                                   i, row3, row2, row1, row0);
378                         rc++;
379                 } else {
380                         break;
381                 }
382         }
383
384         /* TSTORM */
385         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
386                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
387         if (last_idx)
388                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
389
390         /* print the asserts */
391         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
392
393                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
394                               TSTORM_ASSERT_LIST_OFFSET(i));
395                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
396                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
397                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
398                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
399                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
400                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
401
402                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
403                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
404                                   " 0x%08x 0x%08x 0x%08x\n",
405                                   i, row3, row2, row1, row0);
406                         rc++;
407                 } else {
408                         break;
409                 }
410         }
411
412         /* CSTORM */
413         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
414                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
415         if (last_idx)
416                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
417
418         /* print the asserts */
419         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
420
421                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
422                               CSTORM_ASSERT_LIST_OFFSET(i));
423                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
424                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
425                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
426                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
427                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
428                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
429
430                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
431                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
432                                   " 0x%08x 0x%08x 0x%08x\n",
433                                   i, row3, row2, row1, row0);
434                         rc++;
435                 } else {
436                         break;
437                 }
438         }
439
440         /* USTORM */
441         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
442                            USTORM_ASSERT_LIST_INDEX_OFFSET);
443         if (last_idx)
444                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
445
446         /* print the asserts */
447         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
448
449                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
450                               USTORM_ASSERT_LIST_OFFSET(i));
451                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
452                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
453                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
454                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
455                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
456                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
457
458                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
459                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
460                                   " 0x%08x 0x%08x 0x%08x\n",
461                                   i, row3, row2, row1, row0);
462                         rc++;
463                 } else {
464                         break;
465                 }
466         }
467
468         return rc;
469 }
470
471 static void bnx2x_fw_dump(struct bnx2x *bp)
472 {
473         u32 mark, offset;
474         __be32 data[9];
475         int word;
476
477         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
478         mark = ((mark + 0x3) & ~0x3);
479         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
480
481         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
482                 for (word = 0; word < 8; word++)
483                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
484                                                   offset + 4*word));
485                 data[8] = 0x0;
486                 printk(KERN_CONT "%s", (char *)data);
487         }
488         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
489                 for (word = 0; word < 8; word++)
490                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
491                                                   offset + 4*word));
492                 data[8] = 0x0;
493                 printk(KERN_CONT "%s", (char *)data);
494         }
495         printk("\n" KERN_ERR PFX "end of fw dump\n");
496 }
497
498 static void bnx2x_panic_dump(struct bnx2x *bp)
499 {
500         int i;
501         u16 j, start, end;
502
503         bp->stats_state = STATS_STATE_DISABLED;
504         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
505
506         BNX2X_ERR("begin crash dump -----------------\n");
507
508         /* Indices */
509         /* Common */
510         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
511                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
512                   "  spq_prod_idx(%u)\n",
513                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
514                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
515
516         /* Rx */
517         for_each_rx_queue(bp, i) {
518                 struct bnx2x_fastpath *fp = &bp->fp[i];
519
520                 BNX2X_ERR("queue[%d]: rx_bd_prod(%x)  rx_bd_cons(%x)"
521                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
522                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
523                           i, fp->rx_bd_prod, fp->rx_bd_cons,
524                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
525                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
526                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
527                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
528                           fp->rx_sge_prod, fp->last_max_sge,
529                           le16_to_cpu(fp->fp_u_idx),
530                           fp->status_blk->u_status_block.status_block_index);
531         }
532
533         /* Tx */
534         for_each_tx_queue(bp, i) {
535                 struct bnx2x_fastpath *fp = &bp->fp[i];
536                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
537
538                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
539                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
540                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
541                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
542                 BNX2X_ERR("          fp_c_idx(%x)  *sb_c_idx(%x)"
543                           "  bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
544                           fp->status_blk->c_status_block.status_block_index,
545                           hw_prods->packets_prod, hw_prods->bds_prod);
546         }
547
548         /* Rings */
549         /* Rx */
550         for_each_rx_queue(bp, i) {
551                 struct bnx2x_fastpath *fp = &bp->fp[i];
552
553                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
554                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
555                 for (j = start; j != end; j = RX_BD(j + 1)) {
556                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
557                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
558
559                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
560                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
561                 }
562
563                 start = RX_SGE(fp->rx_sge_prod);
564                 end = RX_SGE(fp->last_max_sge);
565                 for (j = start; j != end; j = RX_SGE(j + 1)) {
566                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
567                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
568
569                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
570                                   j, rx_sge[1], rx_sge[0], sw_page->page);
571                 }
572
573                 start = RCQ_BD(fp->rx_comp_cons - 10);
574                 end = RCQ_BD(fp->rx_comp_cons + 503);
575                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
576                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
577
578                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
579                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
580                 }
581         }
582
583         /* Tx */
584         for_each_tx_queue(bp, i) {
585                 struct bnx2x_fastpath *fp = &bp->fp[i];
586
587                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
588                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
589                 for (j = start; j != end; j = TX_BD(j + 1)) {
590                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
591
592                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
593                                   sw_bd->skb, sw_bd->first_bd);
594                 }
595
596                 start = TX_BD(fp->tx_bd_cons - 10);
597                 end = TX_BD(fp->tx_bd_cons + 254);
598                 for (j = start; j != end; j = TX_BD(j + 1)) {
599                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
600
601                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
602                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
603                 }
604         }
605
606         bnx2x_fw_dump(bp);
607         bnx2x_mc_assert(bp);
608         BNX2X_ERR("end crash dump -----------------\n");
609 }
610
611 static void bnx2x_int_enable(struct bnx2x *bp)
612 {
613         int port = BP_PORT(bp);
614         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
615         u32 val = REG_RD(bp, addr);
616         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
617         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
618
619         if (msix) {
620                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
621                          HC_CONFIG_0_REG_INT_LINE_EN_0);
622                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
623                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
624         } else if (msi) {
625                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
626                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
627                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
628                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
629         } else {
630                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
631                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
632                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
633                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
634
635                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
636                    val, port, addr);
637
638                 REG_WR(bp, addr, val);
639
640                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
641         }
642
643         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
644            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
645
646         REG_WR(bp, addr, val);
647
648         if (CHIP_IS_E1H(bp)) {
649                 /* init leading/trailing edge */
650                 if (IS_E1HMF(bp)) {
651                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
652                         if (bp->port.pmf)
653                                 /* enable nig and gpio3 attention */
654                                 val |= 0x1100;
655                 } else
656                         val = 0xffff;
657
658                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
659                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
660         }
661 }
662
663 static void bnx2x_int_disable(struct bnx2x *bp)
664 {
665         int port = BP_PORT(bp);
666         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
667         u32 val = REG_RD(bp, addr);
668
669         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
670                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
671                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
672                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
673
674         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
675            val, port, addr);
676
677         /* flush all outstanding writes */
678         mmiowb();
679
680         REG_WR(bp, addr, val);
681         if (REG_RD(bp, addr) != val)
682                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
683
684 }
685
686 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
687 {
688         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
689         int i, offset;
690
691         /* disable interrupt handling */
692         atomic_inc(&bp->intr_sem);
693         if (disable_hw)
694                 /* prevent the HW from sending interrupts */
695                 bnx2x_int_disable(bp);
696
697         /* make sure all ISRs are done */
698         if (msix) {
699                 synchronize_irq(bp->msix_table[0].vector);
700                 offset = 1;
701                 for_each_queue(bp, i)
702                         synchronize_irq(bp->msix_table[i + offset].vector);
703         } else
704                 synchronize_irq(bp->pdev->irq);
705
706         /* make sure sp_task is not running */
707         cancel_delayed_work(&bp->sp_task);
708         flush_workqueue(bnx2x_wq);
709 }
710
711 /* fast path */
712
713 /*
714  * General service functions
715  */
716
717 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
718                                 u8 storm, u16 index, u8 op, u8 update)
719 {
720         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
721                        COMMAND_REG_INT_ACK);
722         struct igu_ack_register igu_ack;
723
724         igu_ack.status_block_index = index;
725         igu_ack.sb_id_and_flags =
726                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
727                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
728                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
729                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
730
731         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
732            (*(u32 *)&igu_ack), hc_addr);
733         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
734 }
735
736 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
737 {
738         struct host_status_block *fpsb = fp->status_blk;
739         u16 rc = 0;
740
741         barrier(); /* status block is written to by the chip */
742         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
743                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
744                 rc |= 1;
745         }
746         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
747                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
748                 rc |= 2;
749         }
750         return rc;
751 }
752
753 static u16 bnx2x_ack_int(struct bnx2x *bp)
754 {
755         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
756                        COMMAND_REG_SIMD_MASK);
757         u32 result = REG_RD(bp, hc_addr);
758
759         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
760            result, hc_addr);
761
762         return result;
763 }
764
765
766 /*
767  * fast path service functions
768  */
769
770 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
771 {
772         u16 tx_cons_sb;
773
774         /* Tell compiler that status block fields can change */
775         barrier();
776         tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
777         return (fp->tx_pkt_cons != tx_cons_sb);
778 }
779
780 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
781 {
782         /* Tell compiler that consumer and producer can change */
783         barrier();
784         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
785 }
786
787 /* free skb in the packet ring at pos idx
788  * return idx of last bd freed
789  */
790 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
791                              u16 idx)
792 {
793         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
794         struct eth_tx_bd *tx_bd;
795         struct sk_buff *skb = tx_buf->skb;
796         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
797         int nbd;
798
799         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
800            idx, tx_buf, skb);
801
802         /* unmap first bd */
803         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
804         tx_bd = &fp->tx_desc_ring[bd_idx];
805         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
806                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
807
808         nbd = le16_to_cpu(tx_bd->nbd) - 1;
809         new_cons = nbd + tx_buf->first_bd;
810 #ifdef BNX2X_STOP_ON_ERROR
811         if (nbd > (MAX_SKB_FRAGS + 2)) {
812                 BNX2X_ERR("BAD nbd!\n");
813                 bnx2x_panic();
814         }
815 #endif
816
817         /* Skip a parse bd and the TSO split header bd
818            since they have no mapping */
819         if (nbd)
820                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
821
822         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
823                                            ETH_TX_BD_FLAGS_TCP_CSUM |
824                                            ETH_TX_BD_FLAGS_SW_LSO)) {
825                 if (--nbd)
826                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
827                 tx_bd = &fp->tx_desc_ring[bd_idx];
828                 /* is this a TSO split header bd? */
829                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
830                         if (--nbd)
831                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
832                 }
833         }
834
835         /* now free frags */
836         while (nbd > 0) {
837
838                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
839                 tx_bd = &fp->tx_desc_ring[bd_idx];
840                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
841                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
842                 if (--nbd)
843                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
844         }
845
846         /* release skb */
847         WARN_ON(!skb);
848         dev_kfree_skb(skb);
849         tx_buf->first_bd = 0;
850         tx_buf->skb = NULL;
851
852         return new_cons;
853 }
854
855 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
856 {
857         s16 used;
858         u16 prod;
859         u16 cons;
860
861         barrier(); /* Tell compiler that prod and cons can change */
862         prod = fp->tx_bd_prod;
863         cons = fp->tx_bd_cons;
864
865         /* NUM_TX_RINGS = number of "next-page" entries
866            It will be used as a threshold */
867         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
868
869 #ifdef BNX2X_STOP_ON_ERROR
870         WARN_ON(used < 0);
871         WARN_ON(used > fp->bp->tx_ring_size);
872         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
873 #endif
874
875         return (s16)(fp->bp->tx_ring_size) - used;
876 }
877
878 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
879 {
880         struct bnx2x *bp = fp->bp;
881         struct netdev_queue *txq;
882         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
883         int done = 0;
884
885 #ifdef BNX2X_STOP_ON_ERROR
886         if (unlikely(bp->panic))
887                 return;
888 #endif
889
890         txq = netdev_get_tx_queue(bp->dev, fp->index);
891         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
892         sw_cons = fp->tx_pkt_cons;
893
894         while (sw_cons != hw_cons) {
895                 u16 pkt_cons;
896
897                 pkt_cons = TX_BD(sw_cons);
898
899                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
900
901                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
902                    hw_cons, sw_cons, pkt_cons);
903
904 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
905                         rmb();
906                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
907                 }
908 */
909                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
910                 sw_cons++;
911                 done++;
912         }
913
914         fp->tx_pkt_cons = sw_cons;
915         fp->tx_bd_cons = bd_cons;
916
917         /* TBD need a thresh? */
918         if (unlikely(netif_tx_queue_stopped(txq))) {
919
920                 __netif_tx_lock(txq, smp_processor_id());
921
922                 /* Need to make the tx_bd_cons update visible to start_xmit()
923                  * before checking for netif_tx_queue_stopped().  Without the
924                  * memory barrier, there is a small possibility that
925                  * start_xmit() will miss it and cause the queue to be stopped
926                  * forever.
927                  */
928                 smp_mb();
929
930                 if ((netif_tx_queue_stopped(txq)) &&
931                     (bp->state == BNX2X_STATE_OPEN) &&
932                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
933                         netif_tx_wake_queue(txq);
934
935                 __netif_tx_unlock(txq);
936         }
937 }
938
939
940 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
941                            union eth_rx_cqe *rr_cqe)
942 {
943         struct bnx2x *bp = fp->bp;
944         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
945         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
946
947         DP(BNX2X_MSG_SP,
948            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
949            fp->index, cid, command, bp->state,
950            rr_cqe->ramrod_cqe.ramrod_type);
951
952         bp->spq_left++;
953
954         if (fp->index) {
955                 switch (command | fp->state) {
956                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
957                                                 BNX2X_FP_STATE_OPENING):
958                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
959                            cid);
960                         fp->state = BNX2X_FP_STATE_OPEN;
961                         break;
962
963                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
964                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
965                            cid);
966                         fp->state = BNX2X_FP_STATE_HALTED;
967                         break;
968
969                 default:
970                         BNX2X_ERR("unexpected MC reply (%d)  "
971                                   "fp->state is %x\n", command, fp->state);
972                         break;
973                 }
974                 mb(); /* force bnx2x_wait_ramrod() to see the change */
975                 return;
976         }
977
978         switch (command | bp->state) {
979         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
980                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
981                 bp->state = BNX2X_STATE_OPEN;
982                 break;
983
984         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
985                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
986                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
987                 fp->state = BNX2X_FP_STATE_HALTED;
988                 break;
989
990         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
991                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
992                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
993                 break;
994
995
996         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
997         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
998                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
999                 bp->set_mac_pending = 0;
1000                 break;
1001
1002         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1003                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1004                 break;
1005
1006         default:
1007                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1008                           command, bp->state);
1009                 break;
1010         }
1011         mb(); /* force bnx2x_wait_ramrod() to see the change */
1012 }
1013
1014 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1015                                      struct bnx2x_fastpath *fp, u16 index)
1016 {
1017         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1018         struct page *page = sw_buf->page;
1019         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1020
1021         /* Skip "next page" elements */
1022         if (!page)
1023                 return;
1024
1025         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1026                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1027         __free_pages(page, PAGES_PER_SGE_SHIFT);
1028
1029         sw_buf->page = NULL;
1030         sge->addr_hi = 0;
1031         sge->addr_lo = 0;
1032 }
1033
1034 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1035                                            struct bnx2x_fastpath *fp, int last)
1036 {
1037         int i;
1038
1039         for (i = 0; i < last; i++)
1040                 bnx2x_free_rx_sge(bp, fp, i);
1041 }
1042
1043 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1044                                      struct bnx2x_fastpath *fp, u16 index)
1045 {
1046         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1047         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1048         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1049         dma_addr_t mapping;
1050
1051         if (unlikely(page == NULL))
1052                 return -ENOMEM;
1053
1054         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1055                                PCI_DMA_FROMDEVICE);
1056         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1057                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1058                 return -ENOMEM;
1059         }
1060
1061         sw_buf->page = page;
1062         pci_unmap_addr_set(sw_buf, mapping, mapping);
1063
1064         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1065         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1066
1067         return 0;
1068 }
1069
1070 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1071                                      struct bnx2x_fastpath *fp, u16 index)
1072 {
1073         struct sk_buff *skb;
1074         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1075         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1076         dma_addr_t mapping;
1077
1078         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1079         if (unlikely(skb == NULL))
1080                 return -ENOMEM;
1081
1082         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1083                                  PCI_DMA_FROMDEVICE);
1084         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1085                 dev_kfree_skb(skb);
1086                 return -ENOMEM;
1087         }
1088
1089         rx_buf->skb = skb;
1090         pci_unmap_addr_set(rx_buf, mapping, mapping);
1091
1092         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1093         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1094
1095         return 0;
1096 }
1097
1098 /* note that we are not allocating a new skb,
1099  * we are just moving one from cons to prod
1100  * we are not creating a new mapping,
1101  * so there is no need to check for dma_mapping_error().
1102  */
1103 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1104                                struct sk_buff *skb, u16 cons, u16 prod)
1105 {
1106         struct bnx2x *bp = fp->bp;
1107         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1108         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1109         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1110         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1111
1112         pci_dma_sync_single_for_device(bp->pdev,
1113                                        pci_unmap_addr(cons_rx_buf, mapping),
1114                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1115
1116         prod_rx_buf->skb = cons_rx_buf->skb;
1117         pci_unmap_addr_set(prod_rx_buf, mapping,
1118                            pci_unmap_addr(cons_rx_buf, mapping));
1119         *prod_bd = *cons_bd;
1120 }
1121
1122 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1123                                              u16 idx)
1124 {
1125         u16 last_max = fp->last_max_sge;
1126
1127         if (SUB_S16(idx, last_max) > 0)
1128                 fp->last_max_sge = idx;
1129 }
1130
1131 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1132 {
1133         int i, j;
1134
1135         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1136                 int idx = RX_SGE_CNT * i - 1;
1137
1138                 for (j = 0; j < 2; j++) {
1139                         SGE_MASK_CLEAR_BIT(fp, idx);
1140                         idx--;
1141                 }
1142         }
1143 }
1144
1145 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1146                                   struct eth_fast_path_rx_cqe *fp_cqe)
1147 {
1148         struct bnx2x *bp = fp->bp;
1149         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1150                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1151                       SGE_PAGE_SHIFT;
1152         u16 last_max, last_elem, first_elem;
1153         u16 delta = 0;
1154         u16 i;
1155
1156         if (!sge_len)
1157                 return;
1158
1159         /* First mark all used pages */
1160         for (i = 0; i < sge_len; i++)
1161                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1162
1163         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1164            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1165
1166         /* Here we assume that the last SGE index is the biggest */
1167         prefetch((void *)(fp->sge_mask));
1168         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1169
1170         last_max = RX_SGE(fp->last_max_sge);
1171         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1172         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1173
1174         /* If ring is not full */
1175         if (last_elem + 1 != first_elem)
1176                 last_elem++;
1177
1178         /* Now update the prod */
1179         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1180                 if (likely(fp->sge_mask[i]))
1181                         break;
1182
1183                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1184                 delta += RX_SGE_MASK_ELEM_SZ;
1185         }
1186
1187         if (delta > 0) {
1188                 fp->rx_sge_prod += delta;
1189                 /* clear page-end entries */
1190                 bnx2x_clear_sge_mask_next_elems(fp);
1191         }
1192
1193         DP(NETIF_MSG_RX_STATUS,
1194            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1195            fp->last_max_sge, fp->rx_sge_prod);
1196 }
1197
1198 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1199 {
1200         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1201         memset(fp->sge_mask, 0xff,
1202                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1203
1204         /* Clear the two last indices in the page to 1:
1205            these are the indices that correspond to the "next" element,
1206            hence will never be indicated and should be removed from
1207            the calculations. */
1208         bnx2x_clear_sge_mask_next_elems(fp);
1209 }
1210
1211 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1212                             struct sk_buff *skb, u16 cons, u16 prod)
1213 {
1214         struct bnx2x *bp = fp->bp;
1215         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1216         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1217         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1218         dma_addr_t mapping;
1219
1220         /* move empty skb from pool to prod and map it */
1221         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1222         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1223                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1224         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1225
1226         /* move partial skb from cons to pool (don't unmap yet) */
1227         fp->tpa_pool[queue] = *cons_rx_buf;
1228
1229         /* mark bin state as start - print error if current state != stop */
1230         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1231                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1232
1233         fp->tpa_state[queue] = BNX2X_TPA_START;
1234
1235         /* point prod_bd to new skb */
1236         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1237         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1238
1239 #ifdef BNX2X_STOP_ON_ERROR
1240         fp->tpa_queue_used |= (1 << queue);
1241 #ifdef __powerpc64__
1242         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1243 #else
1244         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1245 #endif
1246            fp->tpa_queue_used);
1247 #endif
1248 }
1249
1250 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1251                                struct sk_buff *skb,
1252                                struct eth_fast_path_rx_cqe *fp_cqe,
1253                                u16 cqe_idx)
1254 {
1255         struct sw_rx_page *rx_pg, old_rx_pg;
1256         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1257         u32 i, frag_len, frag_size, pages;
1258         int err;
1259         int j;
1260
1261         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1262         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1263
1264         /* This is needed in order to enable forwarding support */
1265         if (frag_size)
1266                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1267                                                max(frag_size, (u32)len_on_bd));
1268
1269 #ifdef BNX2X_STOP_ON_ERROR
1270         if (pages >
1271             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1272                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1273                           pages, cqe_idx);
1274                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1275                           fp_cqe->pkt_len, len_on_bd);
1276                 bnx2x_panic();
1277                 return -EINVAL;
1278         }
1279 #endif
1280
1281         /* Run through the SGL and compose the fragmented skb */
1282         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1283                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1284
1285                 /* FW gives the indices of the SGE as if the ring is an array
1286                    (meaning that "next" element will consume 2 indices) */
1287                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1288                 rx_pg = &fp->rx_page_ring[sge_idx];
1289                 old_rx_pg = *rx_pg;
1290
1291                 /* If we fail to allocate a substitute page, we simply stop
1292                    where we are and drop the whole packet */
1293                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1294                 if (unlikely(err)) {
1295                         fp->eth_q_stats.rx_skb_alloc_failed++;
1296                         return err;
1297                 }
1298
1299                 /* Unmap the page as we r going to pass it to the stack */
1300                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1301                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1302
1303                 /* Add one frag and update the appropriate fields in the skb */
1304                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1305
1306                 skb->data_len += frag_len;
1307                 skb->truesize += frag_len;
1308                 skb->len += frag_len;
1309
1310                 frag_size -= frag_len;
1311         }
1312
1313         return 0;
1314 }
1315
1316 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1317                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1318                            u16 cqe_idx)
1319 {
1320         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1321         struct sk_buff *skb = rx_buf->skb;
1322         /* alloc new skb */
1323         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1324
1325         /* Unmap skb in the pool anyway, as we are going to change
1326            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1327            fails. */
1328         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1329                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1330
1331         if (likely(new_skb)) {
1332                 /* fix ip xsum and give it to the stack */
1333                 /* (no need to map the new skb) */
1334 #ifdef BCM_VLAN
1335                 int is_vlan_cqe =
1336                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1337                          PARSING_FLAGS_VLAN);
1338                 int is_not_hwaccel_vlan_cqe =
1339                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1340 #endif
1341
1342                 prefetch(skb);
1343                 prefetch(((char *)(skb)) + 128);
1344
1345 #ifdef BNX2X_STOP_ON_ERROR
1346                 if (pad + len > bp->rx_buf_size) {
1347                         BNX2X_ERR("skb_put is about to fail...  "
1348                                   "pad %d  len %d  rx_buf_size %d\n",
1349                                   pad, len, bp->rx_buf_size);
1350                         bnx2x_panic();
1351                         return;
1352                 }
1353 #endif
1354
1355                 skb_reserve(skb, pad);
1356                 skb_put(skb, len);
1357
1358                 skb->protocol = eth_type_trans(skb, bp->dev);
1359                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1360
1361                 {
1362                         struct iphdr *iph;
1363
1364                         iph = (struct iphdr *)skb->data;
1365 #ifdef BCM_VLAN
1366                         /* If there is no Rx VLAN offloading -
1367                            take VLAN tag into an account */
1368                         if (unlikely(is_not_hwaccel_vlan_cqe))
1369                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1370 #endif
1371                         iph->check = 0;
1372                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1373                 }
1374
1375                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1376                                          &cqe->fast_path_cqe, cqe_idx)) {
1377 #ifdef BCM_VLAN
1378                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1379                             (!is_not_hwaccel_vlan_cqe))
1380                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1381                                                 le16_to_cpu(cqe->fast_path_cqe.
1382                                                             vlan_tag));
1383                         else
1384 #endif
1385                                 netif_receive_skb(skb);
1386                 } else {
1387                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1388                            " - dropping packet!\n");
1389                         dev_kfree_skb(skb);
1390                 }
1391
1392
1393                 /* put new skb in bin */
1394                 fp->tpa_pool[queue].skb = new_skb;
1395
1396         } else {
1397                 /* else drop the packet and keep the buffer in the bin */
1398                 DP(NETIF_MSG_RX_STATUS,
1399                    "Failed to allocate new skb - dropping packet!\n");
1400                 fp->eth_q_stats.rx_skb_alloc_failed++;
1401         }
1402
1403         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1404 }
1405
1406 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1407                                         struct bnx2x_fastpath *fp,
1408                                         u16 bd_prod, u16 rx_comp_prod,
1409                                         u16 rx_sge_prod)
1410 {
1411         struct ustorm_eth_rx_producers rx_prods = {0};
1412         int i;
1413
1414         /* Update producers */
1415         rx_prods.bd_prod = bd_prod;
1416         rx_prods.cqe_prod = rx_comp_prod;
1417         rx_prods.sge_prod = rx_sge_prod;
1418
1419         /*
1420          * Make sure that the BD and SGE data is updated before updating the
1421          * producers since FW might read the BD/SGE right after the producer
1422          * is updated.
1423          * This is only applicable for weak-ordered memory model archs such
1424          * as IA-64. The following barrier is also mandatory since FW will
1425          * assumes BDs must have buffers.
1426          */
1427         wmb();
1428
1429         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1430                 REG_WR(bp, BAR_USTRORM_INTMEM +
1431                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1432                        ((u32 *)&rx_prods)[i]);
1433
1434         mmiowb(); /* keep prod updates ordered */
1435
1436         DP(NETIF_MSG_RX_STATUS,
1437            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1438            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1439 }
1440
1441 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1442 {
1443         struct bnx2x *bp = fp->bp;
1444         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1445         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1446         int rx_pkt = 0;
1447
1448 #ifdef BNX2X_STOP_ON_ERROR
1449         if (unlikely(bp->panic))
1450                 return 0;
1451 #endif
1452
1453         /* CQ "next element" is of the size of the regular element,
1454            that's why it's ok here */
1455         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1456         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1457                 hw_comp_cons++;
1458
1459         bd_cons = fp->rx_bd_cons;
1460         bd_prod = fp->rx_bd_prod;
1461         bd_prod_fw = bd_prod;
1462         sw_comp_cons = fp->rx_comp_cons;
1463         sw_comp_prod = fp->rx_comp_prod;
1464
1465         /* Memory barrier necessary as speculative reads of the rx
1466          * buffer can be ahead of the index in the status block
1467          */
1468         rmb();
1469
1470         DP(NETIF_MSG_RX_STATUS,
1471            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1472            fp->index, hw_comp_cons, sw_comp_cons);
1473
1474         while (sw_comp_cons != hw_comp_cons) {
1475                 struct sw_rx_bd *rx_buf = NULL;
1476                 struct sk_buff *skb;
1477                 union eth_rx_cqe *cqe;
1478                 u8 cqe_fp_flags;
1479                 u16 len, pad;
1480
1481                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1482                 bd_prod = RX_BD(bd_prod);
1483                 bd_cons = RX_BD(bd_cons);
1484
1485                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1486                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1487
1488                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1489                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1490                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1491                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1492                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1493                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1494
1495                 /* is this a slowpath msg? */
1496                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1497                         bnx2x_sp_event(fp, cqe);
1498                         goto next_cqe;
1499
1500                 /* this is an rx packet */
1501                 } else {
1502                         rx_buf = &fp->rx_buf_ring[bd_cons];
1503                         skb = rx_buf->skb;
1504                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1505                         pad = cqe->fast_path_cqe.placement_offset;
1506
1507                         /* If CQE is marked both TPA_START and TPA_END
1508                            it is a non-TPA CQE */
1509                         if ((!fp->disable_tpa) &&
1510                             (TPA_TYPE(cqe_fp_flags) !=
1511                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1512                                 u16 queue = cqe->fast_path_cqe.queue_index;
1513
1514                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1515                                         DP(NETIF_MSG_RX_STATUS,
1516                                            "calling tpa_start on queue %d\n",
1517                                            queue);
1518
1519                                         bnx2x_tpa_start(fp, queue, skb,
1520                                                         bd_cons, bd_prod);
1521                                         goto next_rx;
1522                                 }
1523
1524                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1525                                         DP(NETIF_MSG_RX_STATUS,
1526                                            "calling tpa_stop on queue %d\n",
1527                                            queue);
1528
1529                                         if (!BNX2X_RX_SUM_FIX(cqe))
1530                                                 BNX2X_ERR("STOP on none TCP "
1531                                                           "data\n");
1532
1533                                         /* This is a size of the linear data
1534                                            on this skb */
1535                                         len = le16_to_cpu(cqe->fast_path_cqe.
1536                                                                 len_on_bd);
1537                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1538                                                     len, cqe, comp_ring_cons);
1539 #ifdef BNX2X_STOP_ON_ERROR
1540                                         if (bp->panic)
1541                                                 return -EINVAL;
1542 #endif
1543
1544                                         bnx2x_update_sge_prod(fp,
1545                                                         &cqe->fast_path_cqe);
1546                                         goto next_cqe;
1547                                 }
1548                         }
1549
1550                         pci_dma_sync_single_for_device(bp->pdev,
1551                                         pci_unmap_addr(rx_buf, mapping),
1552                                                        pad + RX_COPY_THRESH,
1553                                                        PCI_DMA_FROMDEVICE);
1554                         prefetch(skb);
1555                         prefetch(((char *)(skb)) + 128);
1556
1557                         /* is this an error packet? */
1558                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1559                                 DP(NETIF_MSG_RX_ERR,
1560                                    "ERROR  flags %x  rx packet %u\n",
1561                                    cqe_fp_flags, sw_comp_cons);
1562                                 fp->eth_q_stats.rx_err_discard_pkt++;
1563                                 goto reuse_rx;
1564                         }
1565
1566                         /* Since we don't have a jumbo ring
1567                          * copy small packets if mtu > 1500
1568                          */
1569                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1570                             (len <= RX_COPY_THRESH)) {
1571                                 struct sk_buff *new_skb;
1572
1573                                 new_skb = netdev_alloc_skb(bp->dev,
1574                                                            len + pad);
1575                                 if (new_skb == NULL) {
1576                                         DP(NETIF_MSG_RX_ERR,
1577                                            "ERROR  packet dropped "
1578                                            "because of alloc failure\n");
1579                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1580                                         goto reuse_rx;
1581                                 }
1582
1583                                 /* aligned copy */
1584                                 skb_copy_from_linear_data_offset(skb, pad,
1585                                                     new_skb->data + pad, len);
1586                                 skb_reserve(new_skb, pad);
1587                                 skb_put(new_skb, len);
1588
1589                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1590
1591                                 skb = new_skb;
1592
1593                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1594                                 pci_unmap_single(bp->pdev,
1595                                         pci_unmap_addr(rx_buf, mapping),
1596                                                  bp->rx_buf_size,
1597                                                  PCI_DMA_FROMDEVICE);
1598                                 skb_reserve(skb, pad);
1599                                 skb_put(skb, len);
1600
1601                         } else {
1602                                 DP(NETIF_MSG_RX_ERR,
1603                                    "ERROR  packet dropped because "
1604                                    "of alloc failure\n");
1605                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1606 reuse_rx:
1607                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1608                                 goto next_rx;
1609                         }
1610
1611                         skb->protocol = eth_type_trans(skb, bp->dev);
1612
1613                         skb->ip_summed = CHECKSUM_NONE;
1614                         if (bp->rx_csum) {
1615                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1616                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1617                                 else
1618                                         fp->eth_q_stats.hw_csum_err++;
1619                         }
1620                 }
1621
1622                 skb_record_rx_queue(skb, fp->index);
1623 #ifdef BCM_VLAN
1624                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1625                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1626                      PARSING_FLAGS_VLAN))
1627                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1628                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1629                 else
1630 #endif
1631                         netif_receive_skb(skb);
1632
1633
1634 next_rx:
1635                 rx_buf->skb = NULL;
1636
1637                 bd_cons = NEXT_RX_IDX(bd_cons);
1638                 bd_prod = NEXT_RX_IDX(bd_prod);
1639                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1640                 rx_pkt++;
1641 next_cqe:
1642                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1643                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1644
1645                 if (rx_pkt == budget)
1646                         break;
1647         } /* while */
1648
1649         fp->rx_bd_cons = bd_cons;
1650         fp->rx_bd_prod = bd_prod_fw;
1651         fp->rx_comp_cons = sw_comp_cons;
1652         fp->rx_comp_prod = sw_comp_prod;
1653
1654         /* Update producers */
1655         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1656                              fp->rx_sge_prod);
1657
1658         fp->rx_pkt += rx_pkt;
1659         fp->rx_calls++;
1660
1661         return rx_pkt;
1662 }
1663
1664 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1665 {
1666         struct bnx2x_fastpath *fp = fp_cookie;
1667         struct bnx2x *bp = fp->bp;
1668         int index = fp->index;
1669
1670         /* Return here if interrupt is disabled */
1671         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1672                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1673                 return IRQ_HANDLED;
1674         }
1675
1676         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1677            index, fp->sb_id);
1678         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1679
1680 #ifdef BNX2X_STOP_ON_ERROR
1681         if (unlikely(bp->panic))
1682                 return IRQ_HANDLED;
1683 #endif
1684
1685         prefetch(fp->rx_cons_sb);
1686         prefetch(fp->tx_cons_sb);
1687         prefetch(&fp->status_blk->c_status_block.status_block_index);
1688         prefetch(&fp->status_blk->u_status_block.status_block_index);
1689
1690         napi_schedule(&bnx2x_fp(bp, index, napi));
1691
1692         return IRQ_HANDLED;
1693 }
1694
1695 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1696 {
1697         struct bnx2x *bp = netdev_priv(dev_instance);
1698         u16 status = bnx2x_ack_int(bp);
1699         u16 mask;
1700
1701         /* Return here if interrupt is shared and it's not for us */
1702         if (unlikely(status == 0)) {
1703                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1704                 return IRQ_NONE;
1705         }
1706         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1707
1708         /* Return here if interrupt is disabled */
1709         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1710                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1711                 return IRQ_HANDLED;
1712         }
1713
1714 #ifdef BNX2X_STOP_ON_ERROR
1715         if (unlikely(bp->panic))
1716                 return IRQ_HANDLED;
1717 #endif
1718
1719         mask = 0x2 << bp->fp[0].sb_id;
1720         if (status & mask) {
1721                 struct bnx2x_fastpath *fp = &bp->fp[0];
1722
1723                 prefetch(fp->rx_cons_sb);
1724                 prefetch(fp->tx_cons_sb);
1725                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1726                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1727
1728                 napi_schedule(&bnx2x_fp(bp, 0, napi));
1729
1730                 status &= ~mask;
1731         }
1732
1733
1734         if (unlikely(status & 0x1)) {
1735                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1736
1737                 status &= ~0x1;
1738                 if (!status)
1739                         return IRQ_HANDLED;
1740         }
1741
1742         if (status)
1743                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1744                    status);
1745
1746         return IRQ_HANDLED;
1747 }
1748
1749 /* end of fast path */
1750
1751 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1752
1753 /* Link */
1754
1755 /*
1756  * General service functions
1757  */
1758
1759 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1760 {
1761         u32 lock_status;
1762         u32 resource_bit = (1 << resource);
1763         int func = BP_FUNC(bp);
1764         u32 hw_lock_control_reg;
1765         int cnt;
1766
1767         /* Validating that the resource is within range */
1768         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1769                 DP(NETIF_MSG_HW,
1770                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1771                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1772                 return -EINVAL;
1773         }
1774
1775         if (func <= 5) {
1776                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1777         } else {
1778                 hw_lock_control_reg =
1779                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1780         }
1781
1782         /* Validating that the resource is not already taken */
1783         lock_status = REG_RD(bp, hw_lock_control_reg);
1784         if (lock_status & resource_bit) {
1785                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1786                    lock_status, resource_bit);
1787                 return -EEXIST;
1788         }
1789
1790         /* Try for 5 second every 5ms */
1791         for (cnt = 0; cnt < 1000; cnt++) {
1792                 /* Try to acquire the lock */
1793                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1794                 lock_status = REG_RD(bp, hw_lock_control_reg);
1795                 if (lock_status & resource_bit)
1796                         return 0;
1797
1798                 msleep(5);
1799         }
1800         DP(NETIF_MSG_HW, "Timeout\n");
1801         return -EAGAIN;
1802 }
1803
1804 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1805 {
1806         u32 lock_status;
1807         u32 resource_bit = (1 << resource);
1808         int func = BP_FUNC(bp);
1809         u32 hw_lock_control_reg;
1810
1811         /* Validating that the resource is within range */
1812         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1813                 DP(NETIF_MSG_HW,
1814                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1815                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1816                 return -EINVAL;
1817         }
1818
1819         if (func <= 5) {
1820                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1821         } else {
1822                 hw_lock_control_reg =
1823                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1824         }
1825
1826         /* Validating that the resource is currently taken */
1827         lock_status = REG_RD(bp, hw_lock_control_reg);
1828         if (!(lock_status & resource_bit)) {
1829                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1830                    lock_status, resource_bit);
1831                 return -EFAULT;
1832         }
1833
1834         REG_WR(bp, hw_lock_control_reg, resource_bit);
1835         return 0;
1836 }
1837
1838 /* HW Lock for shared dual port PHYs */
1839 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1840 {
1841         mutex_lock(&bp->port.phy_mutex);
1842
1843         if (bp->port.need_hw_lock)
1844                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1845 }
1846
1847 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1848 {
1849         if (bp->port.need_hw_lock)
1850                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1851
1852         mutex_unlock(&bp->port.phy_mutex);
1853 }
1854
1855 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1856 {
1857         /* The GPIO should be swapped if swap register is set and active */
1858         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1859                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1860         int gpio_shift = gpio_num +
1861                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1862         u32 gpio_mask = (1 << gpio_shift);
1863         u32 gpio_reg;
1864         int value;
1865
1866         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1867                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1868                 return -EINVAL;
1869         }
1870
1871         /* read GPIO value */
1872         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1873
1874         /* get the requested pin value */
1875         if ((gpio_reg & gpio_mask) == gpio_mask)
1876                 value = 1;
1877         else
1878                 value = 0;
1879
1880         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1881
1882         return value;
1883 }
1884
1885 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1886 {
1887         /* The GPIO should be swapped if swap register is set and active */
1888         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1889                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1890         int gpio_shift = gpio_num +
1891                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1892         u32 gpio_mask = (1 << gpio_shift);
1893         u32 gpio_reg;
1894
1895         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1896                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1897                 return -EINVAL;
1898         }
1899
1900         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1901         /* read GPIO and mask except the float bits */
1902         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1903
1904         switch (mode) {
1905         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1906                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1907                    gpio_num, gpio_shift);
1908                 /* clear FLOAT and set CLR */
1909                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1910                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1911                 break;
1912
1913         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1914                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1915                    gpio_num, gpio_shift);
1916                 /* clear FLOAT and set SET */
1917                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1918                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1919                 break;
1920
1921         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1922                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1923                    gpio_num, gpio_shift);
1924                 /* set FLOAT */
1925                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1926                 break;
1927
1928         default:
1929                 break;
1930         }
1931
1932         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1933         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1934
1935         return 0;
1936 }
1937
1938 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1939 {
1940         /* The GPIO should be swapped if swap register is set and active */
1941         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1942                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1943         int gpio_shift = gpio_num +
1944                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1945         u32 gpio_mask = (1 << gpio_shift);
1946         u32 gpio_reg;
1947
1948         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1949                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1950                 return -EINVAL;
1951         }
1952
1953         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1954         /* read GPIO int */
1955         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1956
1957         switch (mode) {
1958         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1959                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1960                                    "output low\n", gpio_num, gpio_shift);
1961                 /* clear SET and set CLR */
1962                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1963                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1964                 break;
1965
1966         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1967                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1968                                    "output high\n", gpio_num, gpio_shift);
1969                 /* clear CLR and set SET */
1970                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1971                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1972                 break;
1973
1974         default:
1975                 break;
1976         }
1977
1978         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1979         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1980
1981         return 0;
1982 }
1983
1984 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1985 {
1986         u32 spio_mask = (1 << spio_num);
1987         u32 spio_reg;
1988
1989         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1990             (spio_num > MISC_REGISTERS_SPIO_7)) {
1991                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1992                 return -EINVAL;
1993         }
1994
1995         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1996         /* read SPIO and mask except the float bits */
1997         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1998
1999         switch (mode) {
2000         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2001                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2002                 /* clear FLOAT and set CLR */
2003                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2004                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2005                 break;
2006
2007         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2008                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2009                 /* clear FLOAT and set SET */
2010                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2011                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2012                 break;
2013
2014         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2015                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2016                 /* set FLOAT */
2017                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2018                 break;
2019
2020         default:
2021                 break;
2022         }
2023
2024         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2025         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2026
2027         return 0;
2028 }
2029
2030 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2031 {
2032         switch (bp->link_vars.ieee_fc &
2033                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2034         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2035                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2036                                           ADVERTISED_Pause);
2037                 break;
2038
2039         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2040                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2041                                          ADVERTISED_Pause);
2042                 break;
2043
2044         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2045                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2046                 break;
2047
2048         default:
2049                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2050                                           ADVERTISED_Pause);
2051                 break;
2052         }
2053 }
2054
2055 static void bnx2x_link_report(struct bnx2x *bp)
2056 {
2057         if (bp->link_vars.link_up) {
2058                 if (bp->state == BNX2X_STATE_OPEN)
2059                         netif_carrier_on(bp->dev);
2060                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2061
2062                 printk("%d Mbps ", bp->link_vars.line_speed);
2063
2064                 if (bp->link_vars.duplex == DUPLEX_FULL)
2065                         printk("full duplex");
2066                 else
2067                         printk("half duplex");
2068
2069                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2070                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2071                                 printk(", receive ");
2072                                 if (bp->link_vars.flow_ctrl &
2073                                     BNX2X_FLOW_CTRL_TX)
2074                                         printk("& transmit ");
2075                         } else {
2076                                 printk(", transmit ");
2077                         }
2078                         printk("flow control ON");
2079                 }
2080                 printk("\n");
2081
2082         } else { /* link_down */
2083                 netif_carrier_off(bp->dev);
2084                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2085         }
2086 }
2087
2088 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2089 {
2090         if (!BP_NOMCP(bp)) {
2091                 u8 rc;
2092
2093                 /* Initialize link parameters structure variables */
2094                 /* It is recommended to turn off RX FC for jumbo frames
2095                    for better performance */
2096                 if (IS_E1HMF(bp))
2097                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2098                 else if (bp->dev->mtu > 5000)
2099                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2100                 else
2101                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2102
2103                 bnx2x_acquire_phy_lock(bp);
2104
2105                 if (load_mode == LOAD_DIAG)
2106                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2107
2108                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2109
2110                 bnx2x_release_phy_lock(bp);
2111
2112                 bnx2x_calc_fc_adv(bp);
2113
2114                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2115                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2116                         bnx2x_link_report(bp);
2117                 }
2118
2119                 return rc;
2120         }
2121         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2122         return -EINVAL;
2123 }
2124
2125 static void bnx2x_link_set(struct bnx2x *bp)
2126 {
2127         if (!BP_NOMCP(bp)) {
2128                 bnx2x_acquire_phy_lock(bp);
2129                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2130                 bnx2x_release_phy_lock(bp);
2131
2132                 bnx2x_calc_fc_adv(bp);
2133         } else
2134                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2135 }
2136
2137 static void bnx2x__link_reset(struct bnx2x *bp)
2138 {
2139         if (!BP_NOMCP(bp)) {
2140                 bnx2x_acquire_phy_lock(bp);
2141                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2142                 bnx2x_release_phy_lock(bp);
2143         } else
2144                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2145 }
2146
2147 static u8 bnx2x_link_test(struct bnx2x *bp)
2148 {
2149         u8 rc;
2150
2151         bnx2x_acquire_phy_lock(bp);
2152         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2153         bnx2x_release_phy_lock(bp);
2154
2155         return rc;
2156 }
2157
2158 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2159 {
2160         u32 r_param = bp->link_vars.line_speed / 8;
2161         u32 fair_periodic_timeout_usec;
2162         u32 t_fair;
2163
2164         memset(&(bp->cmng.rs_vars), 0,
2165                sizeof(struct rate_shaping_vars_per_port));
2166         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2167
2168         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2169         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2170
2171         /* this is the threshold below which no timer arming will occur
2172            1.25 coefficient is for the threshold to be a little bigger
2173            than the real time, to compensate for timer in-accuracy */
2174         bp->cmng.rs_vars.rs_threshold =
2175                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2176
2177         /* resolution of fairness timer */
2178         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2179         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2180         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2181
2182         /* this is the threshold below which we won't arm the timer anymore */
2183         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2184
2185         /* we multiply by 1e3/8 to get bytes/msec.
2186            We don't want the credits to pass a credit
2187            of the t_fair*FAIR_MEM (algorithm resolution) */
2188         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2189         /* since each tick is 4 usec */
2190         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2191 }
2192
2193 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2194 {
2195         struct rate_shaping_vars_per_vn m_rs_vn;
2196         struct fairness_vars_per_vn m_fair_vn;
2197         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2198         u16 vn_min_rate, vn_max_rate;
2199         int i;
2200
2201         /* If function is hidden - set min and max to zeroes */
2202         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2203                 vn_min_rate = 0;
2204                 vn_max_rate = 0;
2205
2206         } else {
2207                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2208                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2209                 /* If fairness is enabled (not all min rates are zeroes) and
2210                    if current min rate is zero - set it to 1.
2211                    This is a requirement of the algorithm. */
2212                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2213                         vn_min_rate = DEF_MIN_RATE;
2214                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2215                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2216         }
2217
2218         DP(NETIF_MSG_IFUP,
2219            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2220            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2221
2222         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2223         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2224
2225         /* global vn counter - maximal Mbps for this vn */
2226         m_rs_vn.vn_counter.rate = vn_max_rate;
2227
2228         /* quota - number of bytes transmitted in this period */
2229         m_rs_vn.vn_counter.quota =
2230                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2231
2232         if (bp->vn_weight_sum) {
2233                 /* credit for each period of the fairness algorithm:
2234                    number of bytes in T_FAIR (the vn share the port rate).
2235                    vn_weight_sum should not be larger than 10000, thus
2236                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2237                    than zero */
2238                 m_fair_vn.vn_credit_delta =
2239                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2240                                                  (8 * bp->vn_weight_sum))),
2241                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2242                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2243                    m_fair_vn.vn_credit_delta);
2244         }
2245
2246         /* Store it to internal memory */
2247         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2248                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2249                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2250                        ((u32 *)(&m_rs_vn))[i]);
2251
2252         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2253                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2254                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2255                        ((u32 *)(&m_fair_vn))[i]);
2256 }
2257
2258
2259 /* This function is called upon link interrupt */
2260 static void bnx2x_link_attn(struct bnx2x *bp)
2261 {
2262         /* Make sure that we are synced with the current statistics */
2263         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2264
2265         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2266
2267         if (bp->link_vars.link_up) {
2268
2269                 /* dropless flow control */
2270                 if (CHIP_IS_E1H(bp)) {
2271                         int port = BP_PORT(bp);
2272                         u32 pause_enabled = 0;
2273
2274                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2275                                 pause_enabled = 1;
2276
2277                         REG_WR(bp, BAR_USTRORM_INTMEM +
2278                                USTORM_PAUSE_ENABLED_OFFSET(port),
2279                                pause_enabled);
2280                 }
2281
2282                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2283                         struct host_port_stats *pstats;
2284
2285                         pstats = bnx2x_sp(bp, port_stats);
2286                         /* reset old bmac stats */
2287                         memset(&(pstats->mac_stx[0]), 0,
2288                                sizeof(struct mac_stx));
2289                 }
2290                 if ((bp->state == BNX2X_STATE_OPEN) ||
2291                     (bp->state == BNX2X_STATE_DISABLED))
2292                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2293         }
2294
2295         /* indicate link status */
2296         bnx2x_link_report(bp);
2297
2298         if (IS_E1HMF(bp)) {
2299                 int port = BP_PORT(bp);
2300                 int func;
2301                 int vn;
2302
2303                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2304                         if (vn == BP_E1HVN(bp))
2305                                 continue;
2306
2307                         func = ((vn << 1) | port);
2308
2309                         /* Set the attention towards other drivers
2310                            on the same port */
2311                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2312                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2313                 }
2314
2315                 if (bp->link_vars.link_up) {
2316                         int i;
2317
2318                         /* Init rate shaping and fairness contexts */
2319                         bnx2x_init_port_minmax(bp);
2320
2321                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2322                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2323
2324                         /* Store it to internal memory */
2325                         for (i = 0;
2326                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2327                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2328                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2329                                        ((u32 *)(&bp->cmng))[i]);
2330                 }
2331         }
2332 }
2333
2334 static void bnx2x__link_status_update(struct bnx2x *bp)
2335 {
2336         if (bp->state != BNX2X_STATE_OPEN)
2337                 return;
2338
2339         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2340
2341         if (bp->link_vars.link_up)
2342                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2343         else
2344                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2345
2346         /* indicate link status */
2347         bnx2x_link_report(bp);
2348 }
2349
2350 static void bnx2x_pmf_update(struct bnx2x *bp)
2351 {
2352         int port = BP_PORT(bp);
2353         u32 val;
2354
2355         bp->port.pmf = 1;
2356         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2357
2358         /* enable nig attention */
2359         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2360         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2361         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2362
2363         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2364 }
2365
2366 /* end of Link */
2367
2368 /* slow path */
2369
2370 /*
2371  * General service functions
2372  */
2373
2374 /* the slow path queue is odd since completions arrive on the fastpath ring */
2375 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2376                          u32 data_hi, u32 data_lo, int common)
2377 {
2378         int func = BP_FUNC(bp);
2379
2380         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2381            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2382            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2383            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2384            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2385
2386 #ifdef BNX2X_STOP_ON_ERROR
2387         if (unlikely(bp->panic))
2388                 return -EIO;
2389 #endif
2390
2391         spin_lock_bh(&bp->spq_lock);
2392
2393         if (!bp->spq_left) {
2394                 BNX2X_ERR("BUG! SPQ ring full!\n");
2395                 spin_unlock_bh(&bp->spq_lock);
2396                 bnx2x_panic();
2397                 return -EBUSY;
2398         }
2399
2400         /* CID needs port number to be encoded int it */
2401         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2402                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2403                                      HW_CID(bp, cid)));
2404         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2405         if (common)
2406                 bp->spq_prod_bd->hdr.type |=
2407                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2408
2409         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2410         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2411
2412         bp->spq_left--;
2413
2414         if (bp->spq_prod_bd == bp->spq_last_bd) {
2415                 bp->spq_prod_bd = bp->spq;
2416                 bp->spq_prod_idx = 0;
2417                 DP(NETIF_MSG_TIMER, "end of spq\n");
2418
2419         } else {
2420                 bp->spq_prod_bd++;
2421                 bp->spq_prod_idx++;
2422         }
2423
2424         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2425                bp->spq_prod_idx);
2426
2427         spin_unlock_bh(&bp->spq_lock);
2428         return 0;
2429 }
2430
2431 /* acquire split MCP access lock register */
2432 static int bnx2x_acquire_alr(struct bnx2x *bp)
2433 {
2434         u32 i, j, val;
2435         int rc = 0;
2436
2437         might_sleep();
2438         i = 100;
2439         for (j = 0; j < i*10; j++) {
2440                 val = (1UL << 31);
2441                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2442                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2443                 if (val & (1L << 31))
2444                         break;
2445
2446                 msleep(5);
2447         }
2448         if (!(val & (1L << 31))) {
2449                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2450                 rc = -EBUSY;
2451         }
2452
2453         return rc;
2454 }
2455
2456 /* release split MCP access lock register */
2457 static void bnx2x_release_alr(struct bnx2x *bp)
2458 {
2459         u32 val = 0;
2460
2461         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2462 }
2463
2464 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2465 {
2466         struct host_def_status_block *def_sb = bp->def_status_blk;
2467         u16 rc = 0;
2468
2469         barrier(); /* status block is written to by the chip */
2470         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2471                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2472                 rc |= 1;
2473         }
2474         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2475                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2476                 rc |= 2;
2477         }
2478         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2479                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2480                 rc |= 4;
2481         }
2482         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2483                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2484                 rc |= 8;
2485         }
2486         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2487                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2488                 rc |= 16;
2489         }
2490         return rc;
2491 }
2492
2493 /*
2494  * slow path service functions
2495  */
2496
2497 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2498 {
2499         int port = BP_PORT(bp);
2500         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2501                        COMMAND_REG_ATTN_BITS_SET);
2502         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2503                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2504         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2505                                        NIG_REG_MASK_INTERRUPT_PORT0;
2506         u32 aeu_mask;
2507         u32 nig_mask = 0;
2508
2509         if (bp->attn_state & asserted)
2510                 BNX2X_ERR("IGU ERROR\n");
2511
2512         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2513         aeu_mask = REG_RD(bp, aeu_addr);
2514
2515         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2516            aeu_mask, asserted);
2517         aeu_mask &= ~(asserted & 0xff);
2518         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2519
2520         REG_WR(bp, aeu_addr, aeu_mask);
2521         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2522
2523         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2524         bp->attn_state |= asserted;
2525         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2526
2527         if (asserted & ATTN_HARD_WIRED_MASK) {
2528                 if (asserted & ATTN_NIG_FOR_FUNC) {
2529
2530                         bnx2x_acquire_phy_lock(bp);
2531
2532                         /* save nig interrupt mask */
2533                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2534                         REG_WR(bp, nig_int_mask_addr, 0);
2535
2536                         bnx2x_link_attn(bp);
2537
2538                         /* handle unicore attn? */
2539                 }
2540                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2541                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2542
2543                 if (asserted & GPIO_2_FUNC)
2544                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2545
2546                 if (asserted & GPIO_3_FUNC)
2547                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2548
2549                 if (asserted & GPIO_4_FUNC)
2550                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2551
2552                 if (port == 0) {
2553                         if (asserted & ATTN_GENERAL_ATTN_1) {
2554                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2555                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2556                         }
2557                         if (asserted & ATTN_GENERAL_ATTN_2) {
2558                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2559                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2560                         }
2561                         if (asserted & ATTN_GENERAL_ATTN_3) {
2562                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2563                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2564                         }
2565                 } else {
2566                         if (asserted & ATTN_GENERAL_ATTN_4) {
2567                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2568                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2569                         }
2570                         if (asserted & ATTN_GENERAL_ATTN_5) {
2571                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2572                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2573                         }
2574                         if (asserted & ATTN_GENERAL_ATTN_6) {
2575                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2576                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2577                         }
2578                 }
2579
2580         } /* if hardwired */
2581
2582         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2583            asserted, hc_addr);
2584         REG_WR(bp, hc_addr, asserted);
2585
2586         /* now set back the mask */
2587         if (asserted & ATTN_NIG_FOR_FUNC) {
2588                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2589                 bnx2x_release_phy_lock(bp);
2590         }
2591 }
2592
2593 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2594 {
2595         int port = BP_PORT(bp);
2596         int reg_offset;
2597         u32 val;
2598
2599         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2600                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2601
2602         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2603
2604                 val = REG_RD(bp, reg_offset);
2605                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2606                 REG_WR(bp, reg_offset, val);
2607
2608                 BNX2X_ERR("SPIO5 hw attention\n");
2609
2610                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2611                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2612                         /* Fan failure attention */
2613
2614                         /* The PHY reset is controlled by GPIO 1 */
2615                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2616                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2617                         /* Low power mode is controlled by GPIO 2 */
2618                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2619                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2620                         /* mark the failure */
2621                         bp->link_params.ext_phy_config &=
2622                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2623                         bp->link_params.ext_phy_config |=
2624                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2625                         SHMEM_WR(bp,
2626                                  dev_info.port_hw_config[port].
2627                                                         external_phy_config,
2628                                  bp->link_params.ext_phy_config);
2629                         /* log the failure */
2630                         printk(KERN_ERR PFX "Fan Failure on Network"
2631                                " Controller %s has caused the driver to"
2632                                " shutdown the card to prevent permanent"
2633                                " damage.  Please contact Dell Support for"
2634                                " assistance\n", bp->dev->name);
2635                         break;
2636
2637                 default:
2638                         break;
2639                 }
2640         }
2641
2642         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2643                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2644                 bnx2x_acquire_phy_lock(bp);
2645                 bnx2x_handle_module_detect_int(&bp->link_params);
2646                 bnx2x_release_phy_lock(bp);
2647         }
2648
2649         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2650
2651                 val = REG_RD(bp, reg_offset);
2652                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2653                 REG_WR(bp, reg_offset, val);
2654
2655                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2656                           (attn & HW_INTERRUT_ASSERT_SET_0));
2657                 bnx2x_panic();
2658         }
2659 }
2660
2661 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2662 {
2663         u32 val;
2664
2665         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2666
2667                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2668                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2669                 /* DORQ discard attention */
2670                 if (val & 0x2)
2671                         BNX2X_ERR("FATAL error from DORQ\n");
2672         }
2673
2674         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2675
2676                 int port = BP_PORT(bp);
2677                 int reg_offset;
2678
2679                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2680                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2681
2682                 val = REG_RD(bp, reg_offset);
2683                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2684                 REG_WR(bp, reg_offset, val);
2685
2686                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2687                           (attn & HW_INTERRUT_ASSERT_SET_1));
2688                 bnx2x_panic();
2689         }
2690 }
2691
2692 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2693 {
2694         u32 val;
2695
2696         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2697
2698                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2699                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2700                 /* CFC error attention */
2701                 if (val & 0x2)
2702                         BNX2X_ERR("FATAL error from CFC\n");
2703         }
2704
2705         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2706
2707                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2708                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2709                 /* RQ_USDMDP_FIFO_OVERFLOW */
2710                 if (val & 0x18000)
2711                         BNX2X_ERR("FATAL error from PXP\n");
2712         }
2713
2714         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2715
2716                 int port = BP_PORT(bp);
2717                 int reg_offset;
2718
2719                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2720                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2721
2722                 val = REG_RD(bp, reg_offset);
2723                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2724                 REG_WR(bp, reg_offset, val);
2725
2726                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2727                           (attn & HW_INTERRUT_ASSERT_SET_2));
2728                 bnx2x_panic();
2729         }
2730 }
2731
2732 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2733 {
2734         u32 val;
2735
2736         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2737
2738                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2739                         int func = BP_FUNC(bp);
2740
2741                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2742                         bnx2x__link_status_update(bp);
2743                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2744                                                         DRV_STATUS_PMF)
2745                                 bnx2x_pmf_update(bp);
2746
2747                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2748
2749                         BNX2X_ERR("MC assert!\n");
2750                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2751                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2752                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2753                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2754                         bnx2x_panic();
2755
2756                 } else if (attn & BNX2X_MCP_ASSERT) {
2757
2758                         BNX2X_ERR("MCP assert!\n");
2759                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2760                         bnx2x_fw_dump(bp);
2761
2762                 } else
2763                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2764         }
2765
2766         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2767                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2768                 if (attn & BNX2X_GRC_TIMEOUT) {
2769                         val = CHIP_IS_E1H(bp) ?
2770                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2771                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2772                 }
2773                 if (attn & BNX2X_GRC_RSV) {
2774                         val = CHIP_IS_E1H(bp) ?
2775                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2776                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2777                 }
2778                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2779         }
2780 }
2781
2782 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2783 {
2784         struct attn_route attn;
2785         struct attn_route group_mask;
2786         int port = BP_PORT(bp);
2787         int index;
2788         u32 reg_addr;
2789         u32 val;
2790         u32 aeu_mask;
2791
2792         /* need to take HW lock because MCP or other port might also
2793            try to handle this event */
2794         bnx2x_acquire_alr(bp);
2795
2796         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2797         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2798         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2799         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2800         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2801            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2802
2803         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2804                 if (deasserted & (1 << index)) {
2805                         group_mask = bp->attn_group[index];
2806
2807                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2808                            index, group_mask.sig[0], group_mask.sig[1],
2809                            group_mask.sig[2], group_mask.sig[3]);
2810
2811                         bnx2x_attn_int_deasserted3(bp,
2812                                         attn.sig[3] & group_mask.sig[3]);
2813                         bnx2x_attn_int_deasserted1(bp,
2814                                         attn.sig[1] & group_mask.sig[1]);
2815                         bnx2x_attn_int_deasserted2(bp,
2816                                         attn.sig[2] & group_mask.sig[2]);
2817                         bnx2x_attn_int_deasserted0(bp,
2818                                         attn.sig[0] & group_mask.sig[0]);
2819
2820                         if ((attn.sig[0] & group_mask.sig[0] &
2821                                                 HW_PRTY_ASSERT_SET_0) ||
2822                             (attn.sig[1] & group_mask.sig[1] &
2823                                                 HW_PRTY_ASSERT_SET_1) ||
2824                             (attn.sig[2] & group_mask.sig[2] &
2825                                                 HW_PRTY_ASSERT_SET_2))
2826                                 BNX2X_ERR("FATAL HW block parity attention\n");
2827                 }
2828         }
2829
2830         bnx2x_release_alr(bp);
2831
2832         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2833
2834         val = ~deasserted;
2835         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2836            val, reg_addr);
2837         REG_WR(bp, reg_addr, val);
2838
2839         if (~bp->attn_state & deasserted)
2840                 BNX2X_ERR("IGU ERROR\n");
2841
2842         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2843                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2844
2845         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2846         aeu_mask = REG_RD(bp, reg_addr);
2847
2848         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2849            aeu_mask, deasserted);
2850         aeu_mask |= (deasserted & 0xff);
2851         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2852
2853         REG_WR(bp, reg_addr, aeu_mask);
2854         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2855
2856         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2857         bp->attn_state &= ~deasserted;
2858         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2859 }
2860
2861 static void bnx2x_attn_int(struct bnx2x *bp)
2862 {
2863         /* read local copy of bits */
2864         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2865                                                                 attn_bits);
2866         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2867                                                                 attn_bits_ack);
2868         u32 attn_state = bp->attn_state;
2869
2870         /* look for changed bits */
2871         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2872         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2873
2874         DP(NETIF_MSG_HW,
2875            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2876            attn_bits, attn_ack, asserted, deasserted);
2877
2878         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2879                 BNX2X_ERR("BAD attention state\n");
2880
2881         /* handle bits that were raised */
2882         if (asserted)
2883                 bnx2x_attn_int_asserted(bp, asserted);
2884
2885         if (deasserted)
2886                 bnx2x_attn_int_deasserted(bp, deasserted);
2887 }
2888
2889 static void bnx2x_sp_task(struct work_struct *work)
2890 {
2891         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2892         u16 status;
2893
2894
2895         /* Return here if interrupt is disabled */
2896         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2897                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2898                 return;
2899         }
2900
2901         status = bnx2x_update_dsb_idx(bp);
2902 /*      if (status == 0)                                     */
2903 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2904
2905         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2906
2907         /* HW attentions */
2908         if (status & 0x1)
2909                 bnx2x_attn_int(bp);
2910
2911         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2912                      IGU_INT_NOP, 1);
2913         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2914                      IGU_INT_NOP, 1);
2915         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2916                      IGU_INT_NOP, 1);
2917         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2918                      IGU_INT_NOP, 1);
2919         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2920                      IGU_INT_ENABLE, 1);
2921
2922 }
2923
2924 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2925 {
2926         struct net_device *dev = dev_instance;
2927         struct bnx2x *bp = netdev_priv(dev);
2928
2929         /* Return here if interrupt is disabled */
2930         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2931                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2932                 return IRQ_HANDLED;
2933         }
2934
2935         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2936
2937 #ifdef BNX2X_STOP_ON_ERROR
2938         if (unlikely(bp->panic))
2939                 return IRQ_HANDLED;
2940 #endif
2941
2942         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2943
2944         return IRQ_HANDLED;
2945 }
2946
2947 /* end of slow path */
2948
2949 /* Statistics */
2950
2951 /****************************************************************************
2952 * Macros
2953 ****************************************************************************/
2954
2955 /* sum[hi:lo] += add[hi:lo] */
2956 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2957         do { \
2958                 s_lo += a_lo; \
2959                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2960         } while (0)
2961
2962 /* difference = minuend - subtrahend */
2963 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2964         do { \
2965                 if (m_lo < s_lo) { \
2966                         /* underflow */ \
2967                         d_hi = m_hi - s_hi; \
2968                         if (d_hi > 0) { \
2969                                 /* we can 'loan' 1 */ \
2970                                 d_hi--; \
2971                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2972                         } else { \
2973                                 /* m_hi <= s_hi */ \
2974                                 d_hi = 0; \
2975                                 d_lo = 0; \
2976                         } \
2977                 } else { \
2978                         /* m_lo >= s_lo */ \
2979                         if (m_hi < s_hi) { \
2980                                 d_hi = 0; \
2981                                 d_lo = 0; \
2982                         } else { \
2983                                 /* m_hi >= s_hi */ \
2984                                 d_hi = m_hi - s_hi; \
2985                                 d_lo = m_lo - s_lo; \
2986                         } \
2987                 } \
2988         } while (0)
2989
2990 #define UPDATE_STAT64(s, t) \
2991         do { \
2992                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2993                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2994                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2995                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2996                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2997                        pstats->mac_stx[1].t##_lo, diff.lo); \
2998         } while (0)
2999
3000 #define UPDATE_STAT64_NIG(s, t) \
3001         do { \
3002                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3003                         diff.lo, new->s##_lo, old->s##_lo); \
3004                 ADD_64(estats->t##_hi, diff.hi, \
3005                        estats->t##_lo, diff.lo); \
3006         } while (0)
3007
3008 /* sum[hi:lo] += add */
3009 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3010         do { \
3011                 s_lo += a; \
3012                 s_hi += (s_lo < a) ? 1 : 0; \
3013         } while (0)
3014
3015 #define UPDATE_EXTEND_STAT(s) \
3016         do { \
3017                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3018                               pstats->mac_stx[1].s##_lo, \
3019                               new->s); \
3020         } while (0)
3021
3022 #define UPDATE_EXTEND_TSTAT(s, t) \
3023         do { \
3024                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3025                 old_tclient->s = tclient->s; \
3026                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3027         } while (0)
3028
3029 #define UPDATE_EXTEND_USTAT(s, t) \
3030         do { \
3031                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3032                 old_uclient->s = uclient->s; \
3033                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3034         } while (0)
3035
3036 #define UPDATE_EXTEND_XSTAT(s, t) \
3037         do { \
3038                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3039                 old_xclient->s = xclient->s; \
3040                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3041         } while (0)
3042
3043 /* minuend -= subtrahend */
3044 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3045         do { \
3046                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3047         } while (0)
3048
3049 /* minuend[hi:lo] -= subtrahend */
3050 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3051         do { \
3052                 SUB_64(m_hi, 0, m_lo, s); \
3053         } while (0)
3054
3055 #define SUB_EXTEND_USTAT(s, t) \
3056         do { \
3057                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3058                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3059         } while (0)
3060
3061 /*
3062  * General service functions
3063  */
3064
3065 static inline long bnx2x_hilo(u32 *hiref)
3066 {
3067         u32 lo = *(hiref + 1);
3068 #if (BITS_PER_LONG == 64)
3069         u32 hi = *hiref;
3070
3071         return HILO_U64(hi, lo);
3072 #else
3073         return lo;
3074 #endif
3075 }
3076
3077 /*
3078  * Init service functions
3079  */
3080
3081 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3082 {
3083         if (!bp->stats_pending) {
3084                 struct eth_query_ramrod_data ramrod_data = {0};
3085                 int i, rc;
3086
3087                 ramrod_data.drv_counter = bp->stats_counter++;
3088                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3089                 for_each_queue(bp, i)
3090                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3091
3092                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3093                                    ((u32 *)&ramrod_data)[1],
3094                                    ((u32 *)&ramrod_data)[0], 0);
3095                 if (rc == 0) {
3096                         /* stats ramrod has it's own slot on the spq */
3097                         bp->spq_left++;
3098                         bp->stats_pending = 1;
3099                 }
3100         }
3101 }
3102
3103 static void bnx2x_stats_init(struct bnx2x *bp)
3104 {
3105         int port = BP_PORT(bp);
3106         int i;
3107
3108         bp->stats_pending = 0;
3109         bp->executer_idx = 0;
3110         bp->stats_counter = 0;
3111
3112         /* port stats */
3113         if (!BP_NOMCP(bp))
3114                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3115         else
3116                 bp->port.port_stx = 0;
3117         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3118
3119         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3120         bp->port.old_nig_stats.brb_discard =
3121                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3122         bp->port.old_nig_stats.brb_truncate =
3123                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3124         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3125                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3126         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3127                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3128
3129         /* function stats */
3130         for_each_queue(bp, i) {
3131                 struct bnx2x_fastpath *fp = &bp->fp[i];
3132
3133                 memset(&fp->old_tclient, 0,
3134                        sizeof(struct tstorm_per_client_stats));
3135                 memset(&fp->old_uclient, 0,
3136                        sizeof(struct ustorm_per_client_stats));
3137                 memset(&fp->old_xclient, 0,
3138                        sizeof(struct xstorm_per_client_stats));
3139                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3140         }
3141
3142         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3143         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3144
3145         bp->stats_state = STATS_STATE_DISABLED;
3146         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3147                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3148 }
3149
3150 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3151 {
3152         struct dmae_command *dmae = &bp->stats_dmae;
3153         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3154
3155         *stats_comp = DMAE_COMP_VAL;
3156         if (CHIP_REV_IS_SLOW(bp))
3157                 return;
3158
3159         /* loader */
3160         if (bp->executer_idx) {
3161                 int loader_idx = PMF_DMAE_C(bp);
3162
3163                 memset(dmae, 0, sizeof(struct dmae_command));
3164
3165                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3166                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3167                                 DMAE_CMD_DST_RESET |
3168 #ifdef __BIG_ENDIAN
3169                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3170 #else
3171                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3172 #endif
3173                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3174                                                DMAE_CMD_PORT_0) |
3175                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3176                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3177                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3178                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3179                                      sizeof(struct dmae_command) *
3180                                      (loader_idx + 1)) >> 2;
3181                 dmae->dst_addr_hi = 0;
3182                 dmae->len = sizeof(struct dmae_command) >> 2;
3183                 if (CHIP_IS_E1(bp))
3184                         dmae->len--;
3185                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3186                 dmae->comp_addr_hi = 0;
3187                 dmae->comp_val = 1;
3188
3189                 *stats_comp = 0;
3190                 bnx2x_post_dmae(bp, dmae, loader_idx);
3191
3192         } else if (bp->func_stx) {
3193                 *stats_comp = 0;
3194                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3195         }
3196 }
3197
3198 static int bnx2x_stats_comp(struct bnx2x *bp)
3199 {
3200         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3201         int cnt = 10;
3202
3203         might_sleep();
3204         while (*stats_comp != DMAE_COMP_VAL) {
3205                 if (!cnt) {
3206                         BNX2X_ERR("timeout waiting for stats finished\n");
3207                         break;
3208                 }
3209                 cnt--;
3210                 msleep(1);
3211         }
3212         return 1;
3213 }
3214
3215 /*
3216  * Statistics service functions
3217  */
3218
3219 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3220 {
3221         struct dmae_command *dmae;
3222         u32 opcode;
3223         int loader_idx = PMF_DMAE_C(bp);
3224         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3225
3226         /* sanity */
3227         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3228                 BNX2X_ERR("BUG!\n");
3229                 return;
3230         }
3231
3232         bp->executer_idx = 0;
3233
3234         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3235                   DMAE_CMD_C_ENABLE |
3236                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3237 #ifdef __BIG_ENDIAN
3238                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3239 #else
3240                   DMAE_CMD_ENDIANITY_DW_SWAP |
3241 #endif
3242                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3243                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3244
3245         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3246         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3247         dmae->src_addr_lo = bp->port.port_stx >> 2;
3248         dmae->src_addr_hi = 0;
3249         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3250         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3251         dmae->len = DMAE_LEN32_RD_MAX;
3252         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3253         dmae->comp_addr_hi = 0;
3254         dmae->comp_val = 1;
3255
3256         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3257         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3258         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3259         dmae->src_addr_hi = 0;
3260         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3261                                    DMAE_LEN32_RD_MAX * 4);
3262         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3263                                    DMAE_LEN32_RD_MAX * 4);
3264         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3265         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3266         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3267         dmae->comp_val = DMAE_COMP_VAL;
3268
3269         *stats_comp = 0;
3270         bnx2x_hw_stats_post(bp);
3271         bnx2x_stats_comp(bp);
3272 }
3273
3274 static void bnx2x_port_stats_init(struct bnx2x *bp)
3275 {
3276         struct dmae_command *dmae;
3277         int port = BP_PORT(bp);
3278         int vn = BP_E1HVN(bp);
3279         u32 opcode;
3280         int loader_idx = PMF_DMAE_C(bp);
3281         u32 mac_addr;
3282         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3283
3284         /* sanity */
3285         if (!bp->link_vars.link_up || !bp->port.pmf) {
3286                 BNX2X_ERR("BUG!\n");
3287                 return;
3288         }
3289
3290         bp->executer_idx = 0;
3291
3292         /* MCP */
3293         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3294                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3295                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3296 #ifdef __BIG_ENDIAN
3297                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3298 #else
3299                   DMAE_CMD_ENDIANITY_DW_SWAP |
3300 #endif
3301                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3302                   (vn << DMAE_CMD_E1HVN_SHIFT));
3303
3304         if (bp->port.port_stx) {
3305
3306                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3307                 dmae->opcode = opcode;
3308                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3309                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3310                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3311                 dmae->dst_addr_hi = 0;
3312                 dmae->len = sizeof(struct host_port_stats) >> 2;
3313                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3314                 dmae->comp_addr_hi = 0;
3315                 dmae->comp_val = 1;
3316         }
3317
3318         if (bp->func_stx) {
3319
3320                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3321                 dmae->opcode = opcode;
3322                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3323                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3324                 dmae->dst_addr_lo = bp->func_stx >> 2;
3325                 dmae->dst_addr_hi = 0;
3326                 dmae->len = sizeof(struct host_func_stats) >> 2;
3327                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3328                 dmae->comp_addr_hi = 0;
3329                 dmae->comp_val = 1;
3330         }
3331
3332         /* MAC */
3333         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3334                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3335                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3336 #ifdef __BIG_ENDIAN
3337                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3338 #else
3339                   DMAE_CMD_ENDIANITY_DW_SWAP |
3340 #endif
3341                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3342                   (vn << DMAE_CMD_E1HVN_SHIFT));
3343
3344         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3345
3346                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3347                                    NIG_REG_INGRESS_BMAC0_MEM);
3348
3349                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3350                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3351                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3352                 dmae->opcode = opcode;
3353                 dmae->src_addr_lo = (mac_addr +
3354                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3355                 dmae->src_addr_hi = 0;
3356                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3357                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3358                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3359                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3360                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3361                 dmae->comp_addr_hi = 0;
3362                 dmae->comp_val = 1;
3363
3364                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3365                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3366                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3367                 dmae->opcode = opcode;
3368                 dmae->src_addr_lo = (mac_addr +
3369                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3370                 dmae->src_addr_hi = 0;
3371                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3372                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3373                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3374                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3375                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3376                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3377                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3378                 dmae->comp_addr_hi = 0;
3379                 dmae->comp_val = 1;
3380
3381         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3382
3383                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3384
3385                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3386                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3387                 dmae->opcode = opcode;
3388                 dmae->src_addr_lo = (mac_addr +
3389                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3390                 dmae->src_addr_hi = 0;
3391                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3392                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3393                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3394                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3395                 dmae->comp_addr_hi = 0;
3396                 dmae->comp_val = 1;
3397
3398                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3399                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3400                 dmae->opcode = opcode;
3401                 dmae->src_addr_lo = (mac_addr +
3402                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3403                 dmae->src_addr_hi = 0;
3404                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3405                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3406                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3407                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3408                 dmae->len = 1;
3409                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3410                 dmae->comp_addr_hi = 0;
3411                 dmae->comp_val = 1;
3412
3413                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3414                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3415                 dmae->opcode = opcode;
3416                 dmae->src_addr_lo = (mac_addr +
3417                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3418                 dmae->src_addr_hi = 0;
3419                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3420                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3421                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3422                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3423                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3424                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3425                 dmae->comp_addr_hi = 0;
3426                 dmae->comp_val = 1;
3427         }
3428
3429         /* NIG */
3430         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3431         dmae->opcode = opcode;
3432         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3433                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3434         dmae->src_addr_hi = 0;
3435         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3436         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3437         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3438         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3439         dmae->comp_addr_hi = 0;
3440         dmae->comp_val = 1;
3441
3442         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3443         dmae->opcode = opcode;
3444         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3445                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3446         dmae->src_addr_hi = 0;
3447         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3448                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3449         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3450                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3451         dmae->len = (2*sizeof(u32)) >> 2;
3452         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3453         dmae->comp_addr_hi = 0;
3454         dmae->comp_val = 1;
3455
3456         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3457         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3458                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3459                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3460 #ifdef __BIG_ENDIAN
3461                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3462 #else
3463                         DMAE_CMD_ENDIANITY_DW_SWAP |
3464 #endif
3465                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3466                         (vn << DMAE_CMD_E1HVN_SHIFT));
3467         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3468                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3469         dmae->src_addr_hi = 0;
3470         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3471                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3472         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3473                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3474         dmae->len = (2*sizeof(u32)) >> 2;
3475         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3476         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3477         dmae->comp_val = DMAE_COMP_VAL;
3478
3479         *stats_comp = 0;
3480 }
3481
3482 static void bnx2x_func_stats_init(struct bnx2x *bp)
3483 {
3484         struct dmae_command *dmae = &bp->stats_dmae;
3485         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3486
3487         /* sanity */
3488         if (!bp->func_stx) {
3489                 BNX2X_ERR("BUG!\n");
3490                 return;
3491         }
3492
3493         bp->executer_idx = 0;
3494         memset(dmae, 0, sizeof(struct dmae_command));
3495
3496         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3497                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3498                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3499 #ifdef __BIG_ENDIAN
3500                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3501 #else
3502                         DMAE_CMD_ENDIANITY_DW_SWAP |
3503 #endif
3504                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3505                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3506         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3507         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3508         dmae->dst_addr_lo = bp->func_stx >> 2;
3509         dmae->dst_addr_hi = 0;
3510         dmae->len = sizeof(struct host_func_stats) >> 2;
3511         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3512         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3513         dmae->comp_val = DMAE_COMP_VAL;
3514
3515         *stats_comp = 0;
3516 }
3517
3518 static void bnx2x_stats_start(struct bnx2x *bp)
3519 {
3520         if (bp->port.pmf)
3521                 bnx2x_port_stats_init(bp);
3522
3523         else if (bp->func_stx)
3524                 bnx2x_func_stats_init(bp);
3525
3526         bnx2x_hw_stats_post(bp);
3527         bnx2x_storm_stats_post(bp);
3528 }
3529
3530 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3531 {
3532         bnx2x_stats_comp(bp);
3533         bnx2x_stats_pmf_update(bp);
3534         bnx2x_stats_start(bp);
3535 }
3536
3537 static void bnx2x_stats_restart(struct bnx2x *bp)
3538 {
3539         bnx2x_stats_comp(bp);
3540         bnx2x_stats_start(bp);
3541 }
3542
3543 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3544 {
3545         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3546         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3547         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3548         struct {
3549                 u32 lo;
3550                 u32 hi;
3551         } diff;
3552
3553         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3554         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3555         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3556         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3557         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3558         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3559         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3560         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3561         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3562         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3563         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3564         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3565         UPDATE_STAT64(tx_stat_gt127,
3566                                 tx_stat_etherstatspkts65octetsto127octets);
3567         UPDATE_STAT64(tx_stat_gt255,
3568                                 tx_stat_etherstatspkts128octetsto255octets);
3569         UPDATE_STAT64(tx_stat_gt511,
3570                                 tx_stat_etherstatspkts256octetsto511octets);
3571         UPDATE_STAT64(tx_stat_gt1023,
3572                                 tx_stat_etherstatspkts512octetsto1023octets);
3573         UPDATE_STAT64(tx_stat_gt1518,
3574                                 tx_stat_etherstatspkts1024octetsto1522octets);
3575         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3576         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3577         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3578         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3579         UPDATE_STAT64(tx_stat_gterr,
3580                                 tx_stat_dot3statsinternalmactransmiterrors);
3581         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3582
3583         estats->pause_frames_received_hi =
3584                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3585         estats->pause_frames_received_lo =
3586                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3587
3588         estats->pause_frames_sent_hi =
3589                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3590         estats->pause_frames_sent_lo =
3591                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3592 }
3593
3594 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3595 {
3596         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3597         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3598         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3599
3600         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3601         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3602         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3603         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3604         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3605         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3606         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3607         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3608         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3609         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3610         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3611         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3612         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3613         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3614         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3615         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3616         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3617         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3618         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3619         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3620         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3621         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3622         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3623         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3624         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3625         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3626         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3627         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3628         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3629         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3630         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3631
3632         estats->pause_frames_received_hi =
3633                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3634         estats->pause_frames_received_lo =
3635                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3636         ADD_64(estats->pause_frames_received_hi,
3637                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3638                estats->pause_frames_received_lo,
3639                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3640
3641         estats->pause_frames_sent_hi =
3642                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3643         estats->pause_frames_sent_lo =
3644                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3645         ADD_64(estats->pause_frames_sent_hi,
3646                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3647                estats->pause_frames_sent_lo,
3648                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3649 }
3650
3651 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3652 {
3653         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3654         struct nig_stats *old = &(bp->port.old_nig_stats);
3655         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3656         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3657         struct {
3658                 u32 lo;
3659                 u32 hi;
3660         } diff;
3661         u32 nig_timer_max;
3662
3663         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3664                 bnx2x_bmac_stats_update(bp);
3665
3666         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3667                 bnx2x_emac_stats_update(bp);
3668
3669         else { /* unreached */
3670                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3671                 return -1;
3672         }
3673
3674         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3675                       new->brb_discard - old->brb_discard);
3676         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3677                       new->brb_truncate - old->brb_truncate);
3678
3679         UPDATE_STAT64_NIG(egress_mac_pkt0,
3680                                         etherstatspkts1024octetsto1522octets);
3681         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3682
3683         memcpy(old, new, sizeof(struct nig_stats));
3684
3685         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3686                sizeof(struct mac_stx));
3687         estats->brb_drop_hi = pstats->brb_drop_hi;
3688         estats->brb_drop_lo = pstats->brb_drop_lo;
3689
3690         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3691
3692         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3693         if (nig_timer_max != estats->nig_timer_max) {
3694                 estats->nig_timer_max = nig_timer_max;
3695                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3696         }
3697
3698         return 0;
3699 }
3700
3701 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3702 {
3703         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3704         struct tstorm_per_port_stats *tport =
3705                                         &stats->tstorm_common.port_statistics;
3706         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3707         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3708         int i;
3709
3710         memset(&(fstats->total_bytes_received_hi), 0,
3711                sizeof(struct host_func_stats) - 2*sizeof(u32));
3712         estats->error_bytes_received_hi = 0;
3713         estats->error_bytes_received_lo = 0;
3714         estats->etherstatsoverrsizepkts_hi = 0;
3715         estats->etherstatsoverrsizepkts_lo = 0;
3716         estats->no_buff_discard_hi = 0;
3717         estats->no_buff_discard_lo = 0;
3718
3719         for_each_queue(bp, i) {
3720                 struct bnx2x_fastpath *fp = &bp->fp[i];
3721                 int cl_id = fp->cl_id;
3722                 struct tstorm_per_client_stats *tclient =
3723                                 &stats->tstorm_common.client_statistics[cl_id];
3724                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3725                 struct ustorm_per_client_stats *uclient =
3726                                 &stats->ustorm_common.client_statistics[cl_id];
3727                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3728                 struct xstorm_per_client_stats *xclient =
3729                                 &stats->xstorm_common.client_statistics[cl_id];
3730                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3731                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3732                 u32 diff;
3733
3734                 /* are storm stats valid? */
3735                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3736                                                         bp->stats_counter) {
3737                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3738                            "  xstorm counter (%d) != stats_counter (%d)\n",
3739                            i, xclient->stats_counter, bp->stats_counter);
3740                         return -1;
3741                 }
3742                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3743                                                         bp->stats_counter) {
3744                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3745                            "  tstorm counter (%d) != stats_counter (%d)\n",
3746                            i, tclient->stats_counter, bp->stats_counter);
3747                         return -2;
3748                 }
3749                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3750                                                         bp->stats_counter) {
3751                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3752                            "  ustorm counter (%d) != stats_counter (%d)\n",
3753                            i, uclient->stats_counter, bp->stats_counter);
3754                         return -4;
3755                 }
3756
3757                 qstats->total_bytes_received_hi =
3758                 qstats->valid_bytes_received_hi =
3759                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3760                 qstats->total_bytes_received_lo =
3761                 qstats->valid_bytes_received_lo =
3762                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3763
3764                 qstats->error_bytes_received_hi =
3765                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3766                 qstats->error_bytes_received_lo =
3767                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3768
3769                 ADD_64(qstats->total_bytes_received_hi,
3770                        qstats->error_bytes_received_hi,
3771                        qstats->total_bytes_received_lo,
3772                        qstats->error_bytes_received_lo);
3773
3774                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3775                                         total_unicast_packets_received);
3776                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3777                                         total_multicast_packets_received);
3778                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3779                                         total_broadcast_packets_received);
3780                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3781                                         etherstatsoverrsizepkts);
3782                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3783
3784                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3785                                         total_unicast_packets_received);
3786                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3787                                         total_multicast_packets_received);
3788                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3789                                         total_broadcast_packets_received);
3790                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3791                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3792                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3793
3794                 qstats->total_bytes_transmitted_hi =
3795                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3796                 qstats->total_bytes_transmitted_lo =
3797                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3798
3799                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3800                                         total_unicast_packets_transmitted);
3801                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3802                                         total_multicast_packets_transmitted);
3803                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3804                                         total_broadcast_packets_transmitted);
3805
3806                 old_tclient->checksum_discard = tclient->checksum_discard;
3807                 old_tclient->ttl0_discard = tclient->ttl0_discard;
3808
3809                 ADD_64(fstats->total_bytes_received_hi,
3810                        qstats->total_bytes_received_hi,
3811                        fstats->total_bytes_received_lo,
3812                        qstats->total_bytes_received_lo);
3813                 ADD_64(fstats->total_bytes_transmitted_hi,
3814                        qstats->total_bytes_transmitted_hi,
3815                        fstats->total_bytes_transmitted_lo,
3816                        qstats->total_bytes_transmitted_lo);
3817                 ADD_64(fstats->total_unicast_packets_received_hi,
3818                        qstats->total_unicast_packets_received_hi,
3819                        fstats->total_unicast_packets_received_lo,
3820                        qstats->total_unicast_packets_received_lo);
3821                 ADD_64(fstats->total_multicast_packets_received_hi,
3822                        qstats->total_multicast_packets_received_hi,
3823                        fstats->total_multicast_packets_received_lo,
3824                        qstats->total_multicast_packets_received_lo);
3825                 ADD_64(fstats->total_broadcast_packets_received_hi,
3826                        qstats->total_broadcast_packets_received_hi,
3827                        fstats->total_broadcast_packets_received_lo,
3828                        qstats->total_broadcast_packets_received_lo);
3829                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3830                        qstats->total_unicast_packets_transmitted_hi,
3831                        fstats->total_unicast_packets_transmitted_lo,
3832                        qstats->total_unicast_packets_transmitted_lo);
3833                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3834                        qstats->total_multicast_packets_transmitted_hi,
3835                        fstats->total_multicast_packets_transmitted_lo,
3836                        qstats->total_multicast_packets_transmitted_lo);
3837                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3838                        qstats->total_broadcast_packets_transmitted_hi,
3839                        fstats->total_broadcast_packets_transmitted_lo,
3840                        qstats->total_broadcast_packets_transmitted_lo);
3841                 ADD_64(fstats->valid_bytes_received_hi,
3842                        qstats->valid_bytes_received_hi,
3843                        fstats->valid_bytes_received_lo,
3844                        qstats->valid_bytes_received_lo);
3845
3846                 ADD_64(estats->error_bytes_received_hi,
3847                        qstats->error_bytes_received_hi,
3848                        estats->error_bytes_received_lo,
3849                        qstats->error_bytes_received_lo);
3850                 ADD_64(estats->etherstatsoverrsizepkts_hi,
3851                        qstats->etherstatsoverrsizepkts_hi,
3852                        estats->etherstatsoverrsizepkts_lo,
3853                        qstats->etherstatsoverrsizepkts_lo);
3854                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3855                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3856         }
3857
3858         ADD_64(fstats->total_bytes_received_hi,
3859                estats->rx_stat_ifhcinbadoctets_hi,
3860                fstats->total_bytes_received_lo,
3861                estats->rx_stat_ifhcinbadoctets_lo);
3862
3863         memcpy(estats, &(fstats->total_bytes_received_hi),
3864                sizeof(struct host_func_stats) - 2*sizeof(u32));
3865
3866         ADD_64(estats->etherstatsoverrsizepkts_hi,
3867                estats->rx_stat_dot3statsframestoolong_hi,
3868                estats->etherstatsoverrsizepkts_lo,
3869                estats->rx_stat_dot3statsframestoolong_lo);
3870         ADD_64(estats->error_bytes_received_hi,
3871                estats->rx_stat_ifhcinbadoctets_hi,
3872                estats->error_bytes_received_lo,
3873                estats->rx_stat_ifhcinbadoctets_lo);
3874
3875         if (bp->port.pmf) {
3876                 estats->mac_filter_discard =
3877                                 le32_to_cpu(tport->mac_filter_discard);
3878                 estats->xxoverflow_discard =
3879                                 le32_to_cpu(tport->xxoverflow_discard);
3880                 estats->brb_truncate_discard =
3881                                 le32_to_cpu(tport->brb_truncate_discard);
3882                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3883         }
3884
3885         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3886
3887         bp->stats_pending = 0;
3888
3889         return 0;
3890 }
3891
3892 static void bnx2x_net_stats_update(struct bnx2x *bp)
3893 {
3894         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3895         struct net_device_stats *nstats = &bp->dev->stats;
3896         int i;
3897
3898         nstats->rx_packets =
3899                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3900                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3901                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3902
3903         nstats->tx_packets =
3904                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3905                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3906                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3907
3908         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3909
3910         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3911
3912         nstats->rx_dropped = estats->mac_discard;
3913         for_each_queue(bp, i)
3914                 nstats->rx_dropped +=
3915                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3916
3917         nstats->tx_dropped = 0;
3918
3919         nstats->multicast =
3920                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3921
3922         nstats->collisions =
3923                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3924
3925         nstats->rx_length_errors =
3926                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3927                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3928         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3929                                  bnx2x_hilo(&estats->brb_truncate_hi);
3930         nstats->rx_crc_errors =
3931                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3932         nstats->rx_frame_errors =
3933                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3934         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3935         nstats->rx_missed_errors = estats->xxoverflow_discard;
3936
3937         nstats->rx_errors = nstats->rx_length_errors +
3938                             nstats->rx_over_errors +
3939                             nstats->rx_crc_errors +
3940                             nstats->rx_frame_errors +
3941                             nstats->rx_fifo_errors +
3942                             nstats->rx_missed_errors;
3943
3944         nstats->tx_aborted_errors =
3945                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3946                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3947         nstats->tx_carrier_errors =
3948                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3949         nstats->tx_fifo_errors = 0;
3950         nstats->tx_heartbeat_errors = 0;
3951         nstats->tx_window_errors = 0;
3952
3953         nstats->tx_errors = nstats->tx_aborted_errors +
3954                             nstats->tx_carrier_errors +
3955             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3956 }
3957
3958 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3959 {
3960         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3961         int i;
3962
3963         estats->driver_xoff = 0;
3964         estats->rx_err_discard_pkt = 0;
3965         estats->rx_skb_alloc_failed = 0;
3966         estats->hw_csum_err = 0;
3967         for_each_queue(bp, i) {
3968                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3969
3970                 estats->driver_xoff += qstats->driver_xoff;
3971                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3972                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3973                 estats->hw_csum_err += qstats->hw_csum_err;
3974         }
3975 }
3976
3977 static void bnx2x_stats_update(struct bnx2x *bp)
3978 {
3979         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3980
3981         if (*stats_comp != DMAE_COMP_VAL)
3982                 return;
3983
3984         if (bp->port.pmf)
3985                 bnx2x_hw_stats_update(bp);
3986
3987         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3988                 BNX2X_ERR("storm stats were not updated for 3 times\n");
3989                 bnx2x_panic();
3990                 return;
3991         }
3992
3993         bnx2x_net_stats_update(bp);
3994         bnx2x_drv_stats_update(bp);
3995
3996         if (bp->msglevel & NETIF_MSG_TIMER) {
3997                 struct tstorm_per_client_stats *old_tclient =
3998                                                         &bp->fp->old_tclient;
3999                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4000                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4001                 struct net_device_stats *nstats = &bp->dev->stats;
4002                 int i;
4003
4004                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4005                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4006                                   "  tx pkt (%lx)\n",
4007                        bnx2x_tx_avail(bp->fp),
4008                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4009                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4010                                   "  rx pkt (%lx)\n",
4011                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4012                              bp->fp->rx_comp_cons),
4013                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4014                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4015                                   "brb truncate %u\n",
4016                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4017                        qstats->driver_xoff,
4018                        estats->brb_drop_lo, estats->brb_truncate_lo);
4019                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4020                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4021                         "mac_discard %u  mac_filter_discard %u  "
4022                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4023                         "ttl0_discard %u\n",
4024                        le32_to_cpu(old_tclient->checksum_discard),
4025                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4026                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4027                        estats->mac_discard, estats->mac_filter_discard,
4028                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4029                        le32_to_cpu(old_tclient->ttl0_discard));
4030
4031                 for_each_queue(bp, i) {
4032                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4033                                bnx2x_fp(bp, i, tx_pkt),
4034                                bnx2x_fp(bp, i, rx_pkt),
4035                                bnx2x_fp(bp, i, rx_calls));
4036                 }
4037         }
4038
4039         bnx2x_hw_stats_post(bp);
4040         bnx2x_storm_stats_post(bp);
4041 }
4042
4043 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4044 {
4045         struct dmae_command *dmae;
4046         u32 opcode;
4047         int loader_idx = PMF_DMAE_C(bp);
4048         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4049
4050         bp->executer_idx = 0;
4051
4052         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4053                   DMAE_CMD_C_ENABLE |
4054                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4055 #ifdef __BIG_ENDIAN
4056                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4057 #else
4058                   DMAE_CMD_ENDIANITY_DW_SWAP |
4059 #endif
4060                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4061                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4062
4063         if (bp->port.port_stx) {
4064
4065                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4066                 if (bp->func_stx)
4067                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4068                 else
4069                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4070                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4071                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4072                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4073                 dmae->dst_addr_hi = 0;
4074                 dmae->len = sizeof(struct host_port_stats) >> 2;
4075                 if (bp->func_stx) {
4076                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4077                         dmae->comp_addr_hi = 0;
4078                         dmae->comp_val = 1;
4079                 } else {
4080                         dmae->comp_addr_lo =
4081                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4082                         dmae->comp_addr_hi =
4083                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4084                         dmae->comp_val = DMAE_COMP_VAL;
4085
4086                         *stats_comp = 0;
4087                 }
4088         }
4089
4090         if (bp->func_stx) {
4091
4092                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4093                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4094                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4095                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4096                 dmae->dst_addr_lo = bp->func_stx >> 2;
4097                 dmae->dst_addr_hi = 0;
4098                 dmae->len = sizeof(struct host_func_stats) >> 2;
4099                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4100                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4101                 dmae->comp_val = DMAE_COMP_VAL;
4102
4103                 *stats_comp = 0;
4104         }
4105 }
4106
4107 static void bnx2x_stats_stop(struct bnx2x *bp)
4108 {
4109         int update = 0;
4110
4111         bnx2x_stats_comp(bp);
4112
4113         if (bp->port.pmf)
4114                 update = (bnx2x_hw_stats_update(bp) == 0);
4115
4116         update |= (bnx2x_storm_stats_update(bp) == 0);
4117
4118         if (update) {
4119                 bnx2x_net_stats_update(bp);
4120
4121                 if (bp->port.pmf)
4122                         bnx2x_port_stats_stop(bp);
4123
4124                 bnx2x_hw_stats_post(bp);
4125                 bnx2x_stats_comp(bp);
4126         }
4127 }
4128
4129 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4130 {
4131 }
4132
4133 static const struct {
4134         void (*action)(struct bnx2x *bp);
4135         enum bnx2x_stats_state next_state;
4136 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4137 /* state        event   */
4138 {
4139 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4140 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4141 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4142 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4143 },
4144 {
4145 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4146 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4147 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4148 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4149 }
4150 };
4151
4152 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4153 {
4154         enum bnx2x_stats_state state = bp->stats_state;
4155
4156         bnx2x_stats_stm[state][event].action(bp);
4157         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4158
4159         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4160                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4161                    state, event, bp->stats_state);
4162 }
4163
4164 static void bnx2x_timer(unsigned long data)
4165 {
4166         struct bnx2x *bp = (struct bnx2x *) data;
4167
4168         if (!netif_running(bp->dev))
4169                 return;
4170
4171         if (atomic_read(&bp->intr_sem) != 0)
4172                 goto timer_restart;
4173
4174         if (poll) {
4175                 struct bnx2x_fastpath *fp = &bp->fp[0];
4176                 int rc;
4177
4178                 bnx2x_tx_int(fp);
4179                 rc = bnx2x_rx_int(fp, 1000);
4180         }
4181
4182         if (!BP_NOMCP(bp)) {
4183                 int func = BP_FUNC(bp);
4184                 u32 drv_pulse;
4185                 u32 mcp_pulse;
4186
4187                 ++bp->fw_drv_pulse_wr_seq;
4188                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4189                 /* TBD - add SYSTEM_TIME */
4190                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4191                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4192
4193                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4194                              MCP_PULSE_SEQ_MASK);
4195                 /* The delta between driver pulse and mcp response
4196                  * should be 1 (before mcp response) or 0 (after mcp response)
4197                  */
4198                 if ((drv_pulse != mcp_pulse) &&
4199                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4200                         /* someone lost a heartbeat... */
4201                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4202                                   drv_pulse, mcp_pulse);
4203                 }
4204         }
4205
4206         if ((bp->state == BNX2X_STATE_OPEN) ||
4207             (bp->state == BNX2X_STATE_DISABLED))
4208                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4209
4210 timer_restart:
4211         mod_timer(&bp->timer, jiffies + bp->current_interval);
4212 }
4213
4214 /* end of Statistics */
4215
4216 /* nic init */
4217
4218 /*
4219  * nic init service functions
4220  */
4221
4222 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4223 {
4224         int port = BP_PORT(bp);
4225
4226         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4227                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4228                         sizeof(struct ustorm_status_block)/4);
4229         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4230                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4231                         sizeof(struct cstorm_status_block)/4);
4232 }
4233
4234 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4235                           dma_addr_t mapping, int sb_id)
4236 {
4237         int port = BP_PORT(bp);
4238         int func = BP_FUNC(bp);
4239         int index;
4240         u64 section;
4241
4242         /* USTORM */
4243         section = ((u64)mapping) + offsetof(struct host_status_block,
4244                                             u_status_block);
4245         sb->u_status_block.status_block_id = sb_id;
4246
4247         REG_WR(bp, BAR_USTRORM_INTMEM +
4248                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4249         REG_WR(bp, BAR_USTRORM_INTMEM +
4250                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4251                U64_HI(section));
4252         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4253                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4254
4255         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4256                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4257                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4258
4259         /* CSTORM */
4260         section = ((u64)mapping) + offsetof(struct host_status_block,
4261                                             c_status_block);
4262         sb->c_status_block.status_block_id = sb_id;
4263
4264         REG_WR(bp, BAR_CSTRORM_INTMEM +
4265                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4266         REG_WR(bp, BAR_CSTRORM_INTMEM +
4267                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4268                U64_HI(section));
4269         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4270                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4271
4272         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4273                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4274                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4275
4276         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4277 }
4278
4279 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4280 {
4281         int func = BP_FUNC(bp);
4282
4283         bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4284                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4285                         sizeof(struct tstorm_def_status_block)/4);
4286         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4287                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4288                         sizeof(struct ustorm_def_status_block)/4);
4289         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4290                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4291                         sizeof(struct cstorm_def_status_block)/4);
4292         bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
4293                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4294                         sizeof(struct xstorm_def_status_block)/4);
4295 }
4296
4297 static void bnx2x_init_def_sb(struct bnx2x *bp,
4298                               struct host_def_status_block *def_sb,
4299                               dma_addr_t mapping, int sb_id)
4300 {
4301         int port = BP_PORT(bp);
4302         int func = BP_FUNC(bp);
4303         int index, val, reg_offset;
4304         u64 section;
4305
4306         /* ATTN */
4307         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4308                                             atten_status_block);
4309         def_sb->atten_status_block.status_block_id = sb_id;
4310
4311         bp->attn_state = 0;
4312
4313         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4314                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4315
4316         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4317                 bp->attn_group[index].sig[0] = REG_RD(bp,
4318                                                      reg_offset + 0x10*index);
4319                 bp->attn_group[index].sig[1] = REG_RD(bp,
4320                                                reg_offset + 0x4 + 0x10*index);
4321                 bp->attn_group[index].sig[2] = REG_RD(bp,
4322                                                reg_offset + 0x8 + 0x10*index);
4323                 bp->attn_group[index].sig[3] = REG_RD(bp,
4324                                                reg_offset + 0xc + 0x10*index);
4325         }
4326
4327         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4328                              HC_REG_ATTN_MSG0_ADDR_L);
4329
4330         REG_WR(bp, reg_offset, U64_LO(section));
4331         REG_WR(bp, reg_offset + 4, U64_HI(section));
4332
4333         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4334
4335         val = REG_RD(bp, reg_offset);
4336         val |= sb_id;
4337         REG_WR(bp, reg_offset, val);
4338
4339         /* USTORM */
4340         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4341                                             u_def_status_block);
4342         def_sb->u_def_status_block.status_block_id = sb_id;
4343
4344         REG_WR(bp, BAR_USTRORM_INTMEM +
4345                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4346         REG_WR(bp, BAR_USTRORM_INTMEM +
4347                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4348                U64_HI(section));
4349         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4350                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4351
4352         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4353                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4354                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4355
4356         /* CSTORM */
4357         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4358                                             c_def_status_block);
4359         def_sb->c_def_status_block.status_block_id = sb_id;
4360
4361         REG_WR(bp, BAR_CSTRORM_INTMEM +
4362                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4363         REG_WR(bp, BAR_CSTRORM_INTMEM +
4364                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4365                U64_HI(section));
4366         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4367                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4368
4369         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4370                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4371                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4372
4373         /* TSTORM */
4374         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4375                                             t_def_status_block);
4376         def_sb->t_def_status_block.status_block_id = sb_id;
4377
4378         REG_WR(bp, BAR_TSTRORM_INTMEM +
4379                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4380         REG_WR(bp, BAR_TSTRORM_INTMEM +
4381                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4382                U64_HI(section));
4383         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4384                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4385
4386         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4387                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4388                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4389
4390         /* XSTORM */
4391         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4392                                             x_def_status_block);
4393         def_sb->x_def_status_block.status_block_id = sb_id;
4394
4395         REG_WR(bp, BAR_XSTRORM_INTMEM +
4396                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4397         REG_WR(bp, BAR_XSTRORM_INTMEM +
4398                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4399                U64_HI(section));
4400         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4401                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4402
4403         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4404                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4405                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4406
4407         bp->stats_pending = 0;
4408         bp->set_mac_pending = 0;
4409
4410         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4411 }
4412
4413 static void bnx2x_update_coalesce(struct bnx2x *bp)
4414 {
4415         int port = BP_PORT(bp);
4416         int i;
4417
4418         for_each_queue(bp, i) {
4419                 int sb_id = bp->fp[i].sb_id;
4420
4421                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4422                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4423                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4424                                                     U_SB_ETH_RX_CQ_INDEX),
4425                         bp->rx_ticks/12);
4426                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4427                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4428                                                      U_SB_ETH_RX_CQ_INDEX),
4429                          bp->rx_ticks ? 0 : 1);
4430
4431                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4432                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4433                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4434                                                     C_SB_ETH_TX_CQ_INDEX),
4435                         bp->tx_ticks/12);
4436                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4437                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4438                                                      C_SB_ETH_TX_CQ_INDEX),
4439                          bp->tx_ticks ? 0 : 1);
4440         }
4441 }
4442
4443 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4444                                        struct bnx2x_fastpath *fp, int last)
4445 {
4446         int i;
4447
4448         for (i = 0; i < last; i++) {
4449                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4450                 struct sk_buff *skb = rx_buf->skb;
4451
4452                 if (skb == NULL) {
4453                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4454                         continue;
4455                 }
4456
4457                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4458                         pci_unmap_single(bp->pdev,
4459                                          pci_unmap_addr(rx_buf, mapping),
4460                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4461
4462                 dev_kfree_skb(skb);
4463                 rx_buf->skb = NULL;
4464         }
4465 }
4466
4467 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4468 {
4469         int func = BP_FUNC(bp);
4470         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4471                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4472         u16 ring_prod, cqe_ring_prod;
4473         int i, j;
4474
4475         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4476         DP(NETIF_MSG_IFUP,
4477            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4478
4479         if (bp->flags & TPA_ENABLE_FLAG) {
4480
4481                 for_each_rx_queue(bp, j) {
4482                         struct bnx2x_fastpath *fp = &bp->fp[j];
4483
4484                         for (i = 0; i < max_agg_queues; i++) {
4485                                 fp->tpa_pool[i].skb =
4486                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4487                                 if (!fp->tpa_pool[i].skb) {
4488                                         BNX2X_ERR("Failed to allocate TPA "
4489                                                   "skb pool for queue[%d] - "
4490                                                   "disabling TPA on this "
4491                                                   "queue!\n", j);
4492                                         bnx2x_free_tpa_pool(bp, fp, i);
4493                                         fp->disable_tpa = 1;
4494                                         break;
4495                                 }
4496                                 pci_unmap_addr_set((struct sw_rx_bd *)
4497                                                         &bp->fp->tpa_pool[i],
4498                                                    mapping, 0);
4499                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4500                         }
4501                 }
4502         }
4503
4504         for_each_rx_queue(bp, j) {
4505                 struct bnx2x_fastpath *fp = &bp->fp[j];
4506
4507                 fp->rx_bd_cons = 0;
4508                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4509                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4510
4511                 /* "next page" elements initialization */
4512                 /* SGE ring */
4513                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4514                         struct eth_rx_sge *sge;
4515
4516                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4517                         sge->addr_hi =
4518                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4519                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4520                         sge->addr_lo =
4521                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4522                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4523                 }
4524
4525                 bnx2x_init_sge_ring_bit_mask(fp);
4526
4527                 /* RX BD ring */
4528                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4529                         struct eth_rx_bd *rx_bd;
4530
4531                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4532                         rx_bd->addr_hi =
4533                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4534                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4535                         rx_bd->addr_lo =
4536                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4537                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4538                 }
4539
4540                 /* CQ ring */
4541                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4542                         struct eth_rx_cqe_next_page *nextpg;
4543
4544                         nextpg = (struct eth_rx_cqe_next_page *)
4545                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4546                         nextpg->addr_hi =
4547                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4548                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4549                         nextpg->addr_lo =
4550                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4551                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4552                 }
4553
4554                 /* Allocate SGEs and initialize the ring elements */
4555                 for (i = 0, ring_prod = 0;
4556                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4557
4558                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4559                                 BNX2X_ERR("was only able to allocate "
4560                                           "%d rx sges\n", i);
4561                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4562                                 /* Cleanup already allocated elements */
4563                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4564                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4565                                 fp->disable_tpa = 1;
4566                                 ring_prod = 0;
4567                                 break;
4568                         }
4569                         ring_prod = NEXT_SGE_IDX(ring_prod);
4570                 }
4571                 fp->rx_sge_prod = ring_prod;
4572
4573                 /* Allocate BDs and initialize BD ring */
4574                 fp->rx_comp_cons = 0;
4575                 cqe_ring_prod = ring_prod = 0;
4576                 for (i = 0; i < bp->rx_ring_size; i++) {
4577                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4578                                 BNX2X_ERR("was only able to allocate "
4579                                           "%d rx skbs on queue[%d]\n", i, j);
4580                                 fp->eth_q_stats.rx_skb_alloc_failed++;
4581                                 break;
4582                         }
4583                         ring_prod = NEXT_RX_IDX(ring_prod);
4584                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4585                         WARN_ON(ring_prod <= i);
4586                 }
4587
4588                 fp->rx_bd_prod = ring_prod;
4589                 /* must not have more available CQEs than BDs */
4590                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4591                                        cqe_ring_prod);
4592                 fp->rx_pkt = fp->rx_calls = 0;
4593
4594                 /* Warning!
4595                  * this will generate an interrupt (to the TSTORM)
4596                  * must only be done after chip is initialized
4597                  */
4598                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4599                                      fp->rx_sge_prod);
4600                 if (j != 0)
4601                         continue;
4602
4603                 REG_WR(bp, BAR_USTRORM_INTMEM +
4604                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4605                        U64_LO(fp->rx_comp_mapping));
4606                 REG_WR(bp, BAR_USTRORM_INTMEM +
4607                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4608                        U64_HI(fp->rx_comp_mapping));
4609         }
4610 }
4611
4612 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4613 {
4614         int i, j;
4615
4616         for_each_tx_queue(bp, j) {
4617                 struct bnx2x_fastpath *fp = &bp->fp[j];
4618
4619                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4620                         struct eth_tx_bd *tx_bd =
4621                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4622
4623                         tx_bd->addr_hi =
4624                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4625                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4626                         tx_bd->addr_lo =
4627                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4628                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4629                 }
4630
4631                 fp->tx_pkt_prod = 0;
4632                 fp->tx_pkt_cons = 0;
4633                 fp->tx_bd_prod = 0;
4634                 fp->tx_bd_cons = 0;
4635                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4636                 fp->tx_pkt = 0;
4637         }
4638 }
4639
4640 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4641 {
4642         int func = BP_FUNC(bp);
4643
4644         spin_lock_init(&bp->spq_lock);
4645
4646         bp->spq_left = MAX_SPQ_PENDING;
4647         bp->spq_prod_idx = 0;
4648         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4649         bp->spq_prod_bd = bp->spq;
4650         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4651
4652         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4653                U64_LO(bp->spq_mapping));
4654         REG_WR(bp,
4655                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4656                U64_HI(bp->spq_mapping));
4657
4658         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4659                bp->spq_prod_idx);
4660 }
4661
4662 static void bnx2x_init_context(struct bnx2x *bp)
4663 {
4664         int i;
4665
4666         for_each_queue(bp, i) {
4667                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4668                 struct bnx2x_fastpath *fp = &bp->fp[i];
4669                 u8 cl_id = fp->cl_id;
4670                 u8 sb_id = fp->sb_id;
4671
4672                 context->ustorm_st_context.common.sb_index_numbers =
4673                                                 BNX2X_RX_SB_INDEX_NUM;
4674                 context->ustorm_st_context.common.clientId = cl_id;
4675                 context->ustorm_st_context.common.status_block_id = sb_id;
4676                 context->ustorm_st_context.common.flags =
4677                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4678                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4679                 context->ustorm_st_context.common.statistics_counter_id =
4680                                                 cl_id;
4681                 context->ustorm_st_context.common.mc_alignment_log_size =
4682                                                 BNX2X_RX_ALIGN_SHIFT;
4683                 context->ustorm_st_context.common.bd_buff_size =
4684                                                 bp->rx_buf_size;
4685                 context->ustorm_st_context.common.bd_page_base_hi =
4686                                                 U64_HI(fp->rx_desc_mapping);
4687                 context->ustorm_st_context.common.bd_page_base_lo =
4688                                                 U64_LO(fp->rx_desc_mapping);
4689                 if (!fp->disable_tpa) {
4690                         context->ustorm_st_context.common.flags |=
4691                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4692                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4693                         context->ustorm_st_context.common.sge_buff_size =
4694                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4695                                          (u32)0xffff);
4696                         context->ustorm_st_context.common.sge_page_base_hi =
4697                                                 U64_HI(fp->rx_sge_mapping);
4698                         context->ustorm_st_context.common.sge_page_base_lo =
4699                                                 U64_LO(fp->rx_sge_mapping);
4700                 }
4701
4702                 context->ustorm_ag_context.cdu_usage =
4703                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4704                                                CDU_REGION_NUMBER_UCM_AG,
4705                                                ETH_CONNECTION_TYPE);
4706
4707                 context->xstorm_st_context.tx_bd_page_base_hi =
4708                                                 U64_HI(fp->tx_desc_mapping);
4709                 context->xstorm_st_context.tx_bd_page_base_lo =
4710                                                 U64_LO(fp->tx_desc_mapping);
4711                 context->xstorm_st_context.db_data_addr_hi =
4712                                                 U64_HI(fp->tx_prods_mapping);
4713                 context->xstorm_st_context.db_data_addr_lo =
4714                                                 U64_LO(fp->tx_prods_mapping);
4715                 context->xstorm_st_context.statistics_data = (cl_id |
4716                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4717                 context->cstorm_st_context.sb_index_number =
4718                                                 C_SB_ETH_TX_CQ_INDEX;
4719                 context->cstorm_st_context.status_block_id = sb_id;
4720
4721                 context->xstorm_ag_context.cdu_reserved =
4722                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4723                                                CDU_REGION_NUMBER_XCM_AG,
4724                                                ETH_CONNECTION_TYPE);
4725         }
4726 }
4727
4728 static void bnx2x_init_ind_table(struct bnx2x *bp)
4729 {
4730         int func = BP_FUNC(bp);
4731         int i;
4732
4733         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4734                 return;
4735
4736         DP(NETIF_MSG_IFUP,
4737            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4738         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4739                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4740                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4741                         bp->fp->cl_id + (i % bp->num_rx_queues));
4742 }
4743
4744 static void bnx2x_set_client_config(struct bnx2x *bp)
4745 {
4746         struct tstorm_eth_client_config tstorm_client = {0};
4747         int port = BP_PORT(bp);
4748         int i;
4749
4750         tstorm_client.mtu = bp->dev->mtu;
4751         tstorm_client.config_flags =
4752                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4753                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4754 #ifdef BCM_VLAN
4755         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4756                 tstorm_client.config_flags |=
4757                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4758                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4759         }
4760 #endif
4761
4762         if (bp->flags & TPA_ENABLE_FLAG) {
4763                 tstorm_client.max_sges_for_packet =
4764                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4765                 tstorm_client.max_sges_for_packet =
4766                         ((tstorm_client.max_sges_for_packet +
4767                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4768                         PAGES_PER_SGE_SHIFT;
4769
4770                 tstorm_client.config_flags |=
4771                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4772         }
4773
4774         for_each_queue(bp, i) {
4775                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4776
4777                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4778                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4779                        ((u32 *)&tstorm_client)[0]);
4780                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4781                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4782                        ((u32 *)&tstorm_client)[1]);
4783         }
4784
4785         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4786            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4787 }
4788
4789 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4790 {
4791         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4792         int mode = bp->rx_mode;
4793         int mask = (1 << BP_L_ID(bp));
4794         int func = BP_FUNC(bp);
4795         int i;
4796
4797         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4798
4799         switch (mode) {
4800         case BNX2X_RX_MODE_NONE: /* no Rx */
4801                 tstorm_mac_filter.ucast_drop_all = mask;
4802                 tstorm_mac_filter.mcast_drop_all = mask;
4803                 tstorm_mac_filter.bcast_drop_all = mask;
4804                 break;
4805
4806         case BNX2X_RX_MODE_NORMAL:
4807                 tstorm_mac_filter.bcast_accept_all = mask;
4808                 break;
4809
4810         case BNX2X_RX_MODE_ALLMULTI:
4811                 tstorm_mac_filter.mcast_accept_all = mask;
4812                 tstorm_mac_filter.bcast_accept_all = mask;
4813                 break;
4814
4815         case BNX2X_RX_MODE_PROMISC:
4816                 tstorm_mac_filter.ucast_accept_all = mask;
4817                 tstorm_mac_filter.mcast_accept_all = mask;
4818                 tstorm_mac_filter.bcast_accept_all = mask;
4819                 break;
4820
4821         default:
4822                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4823                 break;
4824         }
4825
4826         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4827                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4828                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4829                        ((u32 *)&tstorm_mac_filter)[i]);
4830
4831 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4832                    ((u32 *)&tstorm_mac_filter)[i]); */
4833         }
4834
4835         if (mode != BNX2X_RX_MODE_NONE)
4836                 bnx2x_set_client_config(bp);
4837 }
4838
4839 static void bnx2x_init_internal_common(struct bnx2x *bp)
4840 {
4841         int i;
4842
4843         if (bp->flags & TPA_ENABLE_FLAG) {
4844                 struct tstorm_eth_tpa_exist tpa = {0};
4845
4846                 tpa.tpa_exist = 1;
4847
4848                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4849                        ((u32 *)&tpa)[0]);
4850                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4851                        ((u32 *)&tpa)[1]);
4852         }
4853
4854         /* Zero this manually as its initialization is
4855            currently missing in the initTool */
4856         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4857                 REG_WR(bp, BAR_USTRORM_INTMEM +
4858                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4859 }
4860
4861 static void bnx2x_init_internal_port(struct bnx2x *bp)
4862 {
4863         int port = BP_PORT(bp);
4864
4865         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4866         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4867         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4868         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4869 }
4870
4871 /* Calculates the sum of vn_min_rates.
4872    It's needed for further normalizing of the min_rates.
4873    Returns:
4874      sum of vn_min_rates.
4875        or
4876      0 - if all the min_rates are 0.
4877      In the later case fainess algorithm should be deactivated.
4878      If not all min_rates are zero then those that are zeroes will be set to 1.
4879  */
4880 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4881 {
4882         int all_zero = 1;
4883         int port = BP_PORT(bp);
4884         int vn;
4885
4886         bp->vn_weight_sum = 0;
4887         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4888                 int func = 2*vn + port;
4889                 u32 vn_cfg =
4890                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4891                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4892                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4893
4894                 /* Skip hidden vns */
4895                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4896                         continue;
4897
4898                 /* If min rate is zero - set it to 1 */
4899                 if (!vn_min_rate)
4900                         vn_min_rate = DEF_MIN_RATE;
4901                 else
4902                         all_zero = 0;
4903
4904                 bp->vn_weight_sum += vn_min_rate;
4905         }
4906
4907         /* ... only if all min rates are zeros - disable fairness */
4908         if (all_zero)
4909                 bp->vn_weight_sum = 0;
4910 }
4911
4912 static void bnx2x_init_internal_func(struct bnx2x *bp)
4913 {
4914         struct tstorm_eth_function_common_config tstorm_config = {0};
4915         struct stats_indication_flags stats_flags = {0};
4916         int port = BP_PORT(bp);
4917         int func = BP_FUNC(bp);
4918         int i, j;
4919         u32 offset;
4920         u16 max_agg_size;
4921
4922         if (is_multi(bp)) {
4923                 tstorm_config.config_flags = MULTI_FLAGS(bp);
4924                 tstorm_config.rss_result_mask = MULTI_MASK;
4925         }
4926         if (IS_E1HMF(bp))
4927                 tstorm_config.config_flags |=
4928                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4929
4930         tstorm_config.leading_client_id = BP_L_ID(bp);
4931
4932         REG_WR(bp, BAR_TSTRORM_INTMEM +
4933                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4934                (*(u32 *)&tstorm_config));
4935
4936         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4937         bnx2x_set_storm_rx_mode(bp);
4938
4939         for_each_queue(bp, i) {
4940                 u8 cl_id = bp->fp[i].cl_id;
4941
4942                 /* reset xstorm per client statistics */
4943                 offset = BAR_XSTRORM_INTMEM +
4944                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4945                 for (j = 0;
4946                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4947                         REG_WR(bp, offset + j*4, 0);
4948
4949                 /* reset tstorm per client statistics */
4950                 offset = BAR_TSTRORM_INTMEM +
4951                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4952                 for (j = 0;
4953                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4954                         REG_WR(bp, offset + j*4, 0);
4955
4956                 /* reset ustorm per client statistics */
4957                 offset = BAR_USTRORM_INTMEM +
4958                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4959                 for (j = 0;
4960                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4961                         REG_WR(bp, offset + j*4, 0);
4962         }
4963
4964         /* Init statistics related context */
4965         stats_flags.collect_eth = 1;
4966
4967         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4968                ((u32 *)&stats_flags)[0]);
4969         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4970                ((u32 *)&stats_flags)[1]);
4971
4972         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4973                ((u32 *)&stats_flags)[0]);
4974         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4975                ((u32 *)&stats_flags)[1]);
4976
4977         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4978                ((u32 *)&stats_flags)[0]);
4979         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4980                ((u32 *)&stats_flags)[1]);
4981
4982         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4983                ((u32 *)&stats_flags)[0]);
4984         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4985                ((u32 *)&stats_flags)[1]);
4986
4987         REG_WR(bp, BAR_XSTRORM_INTMEM +
4988                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4989                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4990         REG_WR(bp, BAR_XSTRORM_INTMEM +
4991                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4992                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4993
4994         REG_WR(bp, BAR_TSTRORM_INTMEM +
4995                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4996                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4997         REG_WR(bp, BAR_TSTRORM_INTMEM +
4998                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4999                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5000
5001         REG_WR(bp, BAR_USTRORM_INTMEM +
5002                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5003                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5004         REG_WR(bp, BAR_USTRORM_INTMEM +
5005                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5006                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5007
5008         if (CHIP_IS_E1H(bp)) {
5009                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5010                         IS_E1HMF(bp));
5011                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5012                         IS_E1HMF(bp));
5013                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5014                         IS_E1HMF(bp));
5015                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5016                         IS_E1HMF(bp));
5017
5018                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5019                          bp->e1hov);
5020         }
5021
5022         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5023         max_agg_size =
5024                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5025                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5026                     (u32)0xffff);
5027         for_each_rx_queue(bp, i) {
5028                 struct bnx2x_fastpath *fp = &bp->fp[i];
5029
5030                 REG_WR(bp, BAR_USTRORM_INTMEM +
5031                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5032                        U64_LO(fp->rx_comp_mapping));
5033                 REG_WR(bp, BAR_USTRORM_INTMEM +
5034                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5035                        U64_HI(fp->rx_comp_mapping));
5036
5037                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5038                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5039                          max_agg_size);
5040         }
5041
5042         /* dropless flow control */
5043         if (CHIP_IS_E1H(bp)) {
5044                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5045
5046                 rx_pause.bd_thr_low = 250;
5047                 rx_pause.cqe_thr_low = 250;
5048                 rx_pause.cos = 1;
5049                 rx_pause.sge_thr_low = 0;
5050                 rx_pause.bd_thr_high = 350;
5051                 rx_pause.cqe_thr_high = 350;
5052                 rx_pause.sge_thr_high = 0;
5053
5054                 for_each_rx_queue(bp, i) {
5055                         struct bnx2x_fastpath *fp = &bp->fp[i];
5056
5057                         if (!fp->disable_tpa) {
5058                                 rx_pause.sge_thr_low = 150;
5059                                 rx_pause.sge_thr_high = 250;
5060                         }
5061
5062
5063                         offset = BAR_USTRORM_INTMEM +
5064                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5065                                                                    fp->cl_id);
5066                         for (j = 0;
5067                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5068                              j++)
5069                                 REG_WR(bp, offset + j*4,
5070                                        ((u32 *)&rx_pause)[j]);
5071                 }
5072         }
5073
5074         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5075
5076         /* Init rate shaping and fairness contexts */
5077         if (IS_E1HMF(bp)) {
5078                 int vn;
5079
5080                 /* During init there is no active link
5081                    Until link is up, set link rate to 10Gbps */
5082                 bp->link_vars.line_speed = SPEED_10000;
5083                 bnx2x_init_port_minmax(bp);
5084
5085                 bnx2x_calc_vn_weight_sum(bp);
5086
5087                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5088                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5089
5090                 /* Enable rate shaping and fairness */
5091                 bp->cmng.flags.cmng_enables =
5092                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5093                 if (bp->vn_weight_sum)
5094                         bp->cmng.flags.cmng_enables |=
5095                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5096                 else
5097                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5098                            "  fairness will be disabled\n");
5099         } else {
5100                 /* rate shaping and fairness are disabled */
5101                 DP(NETIF_MSG_IFUP,
5102                    "single function mode  minmax will be disabled\n");
5103         }
5104
5105
5106         /* Store it to internal memory */
5107         if (bp->port.pmf)
5108                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5109                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5110                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5111                                ((u32 *)(&bp->cmng))[i]);
5112 }
5113
5114 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5115 {
5116         switch (load_code) {
5117         case FW_MSG_CODE_DRV_LOAD_COMMON:
5118                 bnx2x_init_internal_common(bp);
5119                 /* no break */
5120
5121         case FW_MSG_CODE_DRV_LOAD_PORT:
5122                 bnx2x_init_internal_port(bp);
5123                 /* no break */
5124
5125         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5126                 bnx2x_init_internal_func(bp);
5127                 break;
5128
5129         default:
5130                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5131                 break;
5132         }
5133 }
5134
5135 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5136 {
5137         int i;
5138
5139         for_each_queue(bp, i) {
5140                 struct bnx2x_fastpath *fp = &bp->fp[i];
5141
5142                 fp->bp = bp;
5143                 fp->state = BNX2X_FP_STATE_CLOSED;
5144                 fp->index = i;
5145                 fp->cl_id = BP_L_ID(bp) + i;
5146                 fp->sb_id = fp->cl_id;
5147                 DP(NETIF_MSG_IFUP,
5148                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5149                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5150                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5151                               fp->sb_id);
5152                 bnx2x_update_fpsb_idx(fp);
5153         }
5154
5155         /* ensure status block indices were read */
5156         rmb();
5157
5158
5159         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5160                           DEF_SB_ID);
5161         bnx2x_update_dsb_idx(bp);
5162         bnx2x_update_coalesce(bp);
5163         bnx2x_init_rx_rings(bp);
5164         bnx2x_init_tx_ring(bp);
5165         bnx2x_init_sp_ring(bp);
5166         bnx2x_init_context(bp);
5167         bnx2x_init_internal(bp, load_code);
5168         bnx2x_init_ind_table(bp);
5169         bnx2x_stats_init(bp);
5170
5171         /* At this point, we are ready for interrupts */
5172         atomic_set(&bp->intr_sem, 0);
5173
5174         /* flush all before enabling interrupts */
5175         mb();
5176         mmiowb();
5177
5178         bnx2x_int_enable(bp);
5179 }
5180
5181 /* end of nic init */
5182
5183 /*
5184  * gzip service functions
5185  */
5186
5187 static int bnx2x_gunzip_init(struct bnx2x *bp)
5188 {
5189         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5190                                               &bp->gunzip_mapping);
5191         if (bp->gunzip_buf  == NULL)
5192                 goto gunzip_nomem1;
5193
5194         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5195         if (bp->strm  == NULL)
5196                 goto gunzip_nomem2;
5197
5198         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5199                                       GFP_KERNEL);
5200         if (bp->strm->workspace == NULL)
5201                 goto gunzip_nomem3;
5202
5203         return 0;
5204
5205 gunzip_nomem3:
5206         kfree(bp->strm);
5207         bp->strm = NULL;
5208
5209 gunzip_nomem2:
5210         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5211                             bp->gunzip_mapping);
5212         bp->gunzip_buf = NULL;
5213
5214 gunzip_nomem1:
5215         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5216                " un-compression\n", bp->dev->name);
5217         return -ENOMEM;
5218 }
5219
5220 static void bnx2x_gunzip_end(struct bnx2x *bp)
5221 {
5222         kfree(bp->strm->workspace);
5223
5224         kfree(bp->strm);
5225         bp->strm = NULL;
5226
5227         if (bp->gunzip_buf) {
5228                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5229                                     bp->gunzip_mapping);
5230                 bp->gunzip_buf = NULL;
5231         }
5232 }
5233
5234 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5235 {
5236         int n, rc;
5237
5238         /* check gzip header */
5239         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5240                 return -EINVAL;
5241
5242         n = 10;
5243
5244 #define FNAME                           0x8
5245
5246         if (zbuf[3] & FNAME)
5247                 while ((zbuf[n++] != 0) && (n < len));
5248
5249         bp->strm->next_in = zbuf + n;
5250         bp->strm->avail_in = len - n;
5251         bp->strm->next_out = bp->gunzip_buf;
5252         bp->strm->avail_out = FW_BUF_SIZE;
5253
5254         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5255         if (rc != Z_OK)
5256                 return rc;
5257
5258         rc = zlib_inflate(bp->strm, Z_FINISH);
5259         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5260                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5261                        bp->dev->name, bp->strm->msg);
5262
5263         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5264         if (bp->gunzip_outlen & 0x3)
5265                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5266                                     " gunzip_outlen (%d) not aligned\n",
5267                        bp->dev->name, bp->gunzip_outlen);
5268         bp->gunzip_outlen >>= 2;
5269
5270         zlib_inflateEnd(bp->strm);
5271
5272         if (rc == Z_STREAM_END)
5273                 return 0;
5274
5275         return rc;
5276 }
5277
5278 /* nic load/unload */
5279
5280 /*
5281  * General service functions
5282  */
5283
5284 /* send a NIG loopback debug packet */
5285 static void bnx2x_lb_pckt(struct bnx2x *bp)
5286 {
5287         u32 wb_write[3];
5288
5289         /* Ethernet source and destination addresses */
5290         wb_write[0] = 0x55555555;
5291         wb_write[1] = 0x55555555;
5292         wb_write[2] = 0x20;             /* SOP */
5293         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5294
5295         /* NON-IP protocol */
5296         wb_write[0] = 0x09000000;
5297         wb_write[1] = 0x55555555;
5298         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5299         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5300 }
5301
5302 /* some of the internal memories
5303  * are not directly readable from the driver
5304  * to test them we send debug packets
5305  */
5306 static int bnx2x_int_mem_test(struct bnx2x *bp)
5307 {
5308         int factor;
5309         int count, i;
5310         u32 val = 0;
5311
5312         if (CHIP_REV_IS_FPGA(bp))
5313                 factor = 120;
5314         else if (CHIP_REV_IS_EMUL(bp))
5315                 factor = 200;
5316         else
5317                 factor = 1;
5318
5319         DP(NETIF_MSG_HW, "start part1\n");
5320
5321         /* Disable inputs of parser neighbor blocks */
5322         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5323         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5324         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5325         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5326
5327         /*  Write 0 to parser credits for CFC search request */
5328         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5329
5330         /* send Ethernet packet */
5331         bnx2x_lb_pckt(bp);
5332
5333         /* TODO do i reset NIG statistic? */
5334         /* Wait until NIG register shows 1 packet of size 0x10 */
5335         count = 1000 * factor;
5336         while (count) {
5337
5338                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5339                 val = *bnx2x_sp(bp, wb_data[0]);
5340                 if (val == 0x10)
5341                         break;
5342
5343                 msleep(10);
5344                 count--;
5345         }
5346         if (val != 0x10) {
5347                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5348                 return -1;
5349         }
5350
5351         /* Wait until PRS register shows 1 packet */
5352         count = 1000 * factor;
5353         while (count) {
5354                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5355                 if (val == 1)
5356                         break;
5357
5358                 msleep(10);
5359                 count--;
5360         }
5361         if (val != 0x1) {
5362                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5363                 return -2;
5364         }
5365
5366         /* Reset and init BRB, PRS */
5367         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5368         msleep(50);
5369         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5370         msleep(50);
5371         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5372         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5373
5374         DP(NETIF_MSG_HW, "part2\n");
5375
5376         /* Disable inputs of parser neighbor blocks */
5377         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5378         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5379         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5380         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5381
5382         /* Write 0 to parser credits for CFC search request */
5383         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5384
5385         /* send 10 Ethernet packets */
5386         for (i = 0; i < 10; i++)
5387                 bnx2x_lb_pckt(bp);
5388
5389         /* Wait until NIG register shows 10 + 1
5390            packets of size 11*0x10 = 0xb0 */
5391         count = 1000 * factor;
5392         while (count) {
5393
5394                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5395                 val = *bnx2x_sp(bp, wb_data[0]);
5396                 if (val == 0xb0)
5397                         break;
5398
5399                 msleep(10);
5400                 count--;
5401         }
5402         if (val != 0xb0) {
5403                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5404                 return -3;
5405         }
5406
5407         /* Wait until PRS register shows 2 packets */
5408         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5409         if (val != 2)
5410                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5411
5412         /* Write 1 to parser credits for CFC search request */
5413         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5414
5415         /* Wait until PRS register shows 3 packets */
5416         msleep(10 * factor);
5417         /* Wait until NIG register shows 1 packet of size 0x10 */
5418         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5419         if (val != 3)
5420                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5421
5422         /* clear NIG EOP FIFO */
5423         for (i = 0; i < 11; i++)
5424                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5425         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5426         if (val != 1) {
5427                 BNX2X_ERR("clear of NIG failed\n");
5428                 return -4;
5429         }
5430
5431         /* Reset and init BRB, PRS, NIG */
5432         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5433         msleep(50);
5434         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5435         msleep(50);
5436         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5437         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5438 #ifndef BCM_ISCSI
5439         /* set NIC mode */
5440         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5441 #endif
5442
5443         /* Enable inputs of parser neighbor blocks */
5444         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5445         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5446         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5447         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5448
5449         DP(NETIF_MSG_HW, "done\n");
5450
5451         return 0; /* OK */
5452 }
5453
5454 static void enable_blocks_attention(struct bnx2x *bp)
5455 {
5456         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5457         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5458         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5459         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5460         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5461         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5462         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5463         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5464         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5465 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5466 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5467         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5468         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5469         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5470 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5471 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5472         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5473         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5474         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5475         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5476 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5477 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5478         if (CHIP_REV_IS_FPGA(bp))
5479                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5480         else
5481                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5482         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5483         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5484         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5485 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5486 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5487         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5488         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5489 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5490         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5491 }
5492
5493
5494 static void bnx2x_reset_common(struct bnx2x *bp)
5495 {
5496         /* reset_common */
5497         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5498                0xd3ffff7f);
5499         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5500 }
5501
5502 static int bnx2x_init_common(struct bnx2x *bp)
5503 {
5504         u32 val, i;
5505
5506         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5507
5508         bnx2x_reset_common(bp);
5509         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5510         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5511
5512         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5513         if (CHIP_IS_E1H(bp))
5514                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5515
5516         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5517         msleep(30);
5518         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5519
5520         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5521         if (CHIP_IS_E1(bp)) {
5522                 /* enable HW interrupt from PXP on USDM overflow
5523                    bit 16 on INT_MASK_0 */
5524                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5525         }
5526
5527         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5528         bnx2x_init_pxp(bp);
5529
5530 #ifdef __BIG_ENDIAN
5531         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5532         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5533         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5534         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5535         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5536         /* make sure this value is 0 */
5537         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5538
5539 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5540         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5541         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5542         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5543         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5544 #endif
5545
5546         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5547 #ifdef BCM_ISCSI
5548         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5549         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5550         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5551 #endif
5552
5553         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5554                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5555
5556         /* let the HW do it's magic ... */
5557         msleep(100);
5558         /* finish PXP init */
5559         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5560         if (val != 1) {
5561                 BNX2X_ERR("PXP2 CFG failed\n");
5562                 return -EBUSY;
5563         }
5564         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5565         if (val != 1) {
5566                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5567                 return -EBUSY;
5568         }
5569
5570         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5571         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5572
5573         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5574
5575         /* clean the DMAE memory */
5576         bp->dmae_ready = 1;
5577         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5578
5579         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5580         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5581         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5582         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5583
5584         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5585         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5586         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5587         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5588
5589         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5590         /* soft reset pulse */
5591         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5592         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5593
5594 #ifdef BCM_ISCSI
5595         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5596 #endif
5597
5598         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5599         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5600         if (!CHIP_REV_IS_SLOW(bp)) {
5601                 /* enable hw interrupt from doorbell Q */
5602                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5603         }
5604
5605         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5606         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5607         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5608         /* set NIC mode */
5609         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5610         if (CHIP_IS_E1H(bp))
5611                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5612
5613         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5614         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5615         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5616         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5617
5618         bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5619         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5620         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5621         bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5622
5623         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5624         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5625         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5626         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5627
5628         /* sync semi rtc */
5629         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5630                0x80000000);
5631         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5632                0x80000000);
5633
5634         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5635         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5636         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5637
5638         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5639         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5640                 REG_WR(bp, i, 0xc0cac01a);
5641                 /* TODO: replace with something meaningful */
5642         }
5643         bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5644         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5645
5646         if (sizeof(union cdu_context) != 1024)
5647                 /* we currently assume that a context is 1024 bytes */
5648                 printk(KERN_ALERT PFX "please adjust the size of"
5649                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5650
5651         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5652         val = (4 << 24) + (0 << 12) + 1024;
5653         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5654         if (CHIP_IS_E1(bp)) {
5655                 /* !!! fix pxp client crdit until excel update */
5656                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5657                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5658         }
5659
5660         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5661         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5662         /* enable context validation interrupt from CFC */
5663         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5664
5665         /* set the thresholds to prevent CFC/CDU race */
5666         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5667
5668         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5669         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5670
5671         /* PXPCS COMMON comes here */
5672         /* Reset PCIE errors for debug */
5673         REG_WR(bp, 0x2814, 0xffffffff);
5674         REG_WR(bp, 0x3820, 0xffffffff);
5675
5676         /* EMAC0 COMMON comes here */
5677         /* EMAC1 COMMON comes here */
5678         /* DBU COMMON comes here */
5679         /* DBG COMMON comes here */
5680
5681         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5682         if (CHIP_IS_E1H(bp)) {
5683                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5684                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5685         }
5686
5687         if (CHIP_REV_IS_SLOW(bp))
5688                 msleep(200);
5689
5690         /* finish CFC init */
5691         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5692         if (val != 1) {
5693                 BNX2X_ERR("CFC LL_INIT failed\n");
5694                 return -EBUSY;
5695         }
5696         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5697         if (val != 1) {
5698                 BNX2X_ERR("CFC AC_INIT failed\n");
5699                 return -EBUSY;
5700         }
5701         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5702         if (val != 1) {
5703                 BNX2X_ERR("CFC CAM_INIT failed\n");
5704                 return -EBUSY;
5705         }
5706         REG_WR(bp, CFC_REG_DEBUG0, 0);
5707
5708         /* read NIG statistic
5709            to see if this is our first up since powerup */
5710         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5711         val = *bnx2x_sp(bp, wb_data[0]);
5712
5713         /* do internal memory self test */
5714         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5715                 BNX2X_ERR("internal mem self test failed\n");
5716                 return -EBUSY;
5717         }
5718
5719         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5720         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5721         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5722         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5723                 bp->port.need_hw_lock = 1;
5724                 break;
5725
5726         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5727                 /* Fan failure is indicated by SPIO 5 */
5728                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5729                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5730
5731                 /* set to active low mode */
5732                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5733                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5734                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5735                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5736
5737                 /* enable interrupt to signal the IGU */
5738                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5739                 val |= (1 << MISC_REGISTERS_SPIO_5);
5740                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5741                 break;
5742
5743         default:
5744                 break;
5745         }
5746
5747         /* clear PXP2 attentions */
5748         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5749
5750         enable_blocks_attention(bp);
5751
5752         if (!BP_NOMCP(bp)) {
5753                 bnx2x_acquire_phy_lock(bp);
5754                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5755                 bnx2x_release_phy_lock(bp);
5756         } else
5757                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5758
5759         return 0;
5760 }
5761
5762 static int bnx2x_init_port(struct bnx2x *bp)
5763 {
5764         int port = BP_PORT(bp);
5765         u32 low, high;
5766         u32 val;
5767
5768         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5769
5770         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5771
5772         /* Port PXP comes here */
5773         /* Port PXP2 comes here */
5774 #ifdef BCM_ISCSI
5775         /* Port0  1
5776          * Port1  385 */
5777         i++;
5778         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5779         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5780         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5781         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5782
5783         /* Port0  2
5784          * Port1  386 */
5785         i++;
5786         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5787         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5788         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5789         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5790
5791         /* Port0  3
5792          * Port1  387 */
5793         i++;
5794         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5795         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5796         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5797         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5798 #endif
5799         /* Port CMs come here */
5800         bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5801                              (port ? XCM_PORT1_END : XCM_PORT0_END));
5802
5803         /* Port QM comes here */
5804 #ifdef BCM_ISCSI
5805         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5806         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5807
5808         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5809                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5810 #endif
5811         /* Port DQ comes here */
5812
5813         bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5814                              (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5815         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5816                 /* no pause for emulation and FPGA */
5817                 low = 0;
5818                 high = 513;
5819         } else {
5820                 if (IS_E1HMF(bp))
5821                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5822                 else if (bp->dev->mtu > 4096) {
5823                         if (bp->flags & ONE_PORT_FLAG)
5824                                 low = 160;
5825                         else {
5826                                 val = bp->dev->mtu;
5827                                 /* (24*1024 + val*4)/256 */
5828                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5829                         }
5830                 } else
5831                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5832                 high = low + 56;        /* 14*1024/256 */
5833         }
5834         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5835         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5836
5837
5838         /* Port PRS comes here */
5839         /* Port TSDM comes here */
5840         /* Port CSDM comes here */
5841         /* Port USDM comes here */
5842         /* Port XSDM comes here */
5843
5844         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5845                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5846         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5847                              port ? USEM_PORT1_END : USEM_PORT0_END);
5848         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5849                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5850         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5851                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5852
5853         /* Port UPB comes here */
5854         /* Port XPB comes here */
5855
5856         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5857                              port ? PBF_PORT1_END : PBF_PORT0_END);
5858
5859         /* configure PBF to work without PAUSE mtu 9000 */
5860         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5861
5862         /* update threshold */
5863         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5864         /* update init credit */
5865         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5866
5867         /* probe changes */
5868         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5869         msleep(5);
5870         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5871
5872 #ifdef BCM_ISCSI
5873         /* tell the searcher where the T2 table is */
5874         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5875
5876         wb_write[0] = U64_LO(bp->t2_mapping);
5877         wb_write[1] = U64_HI(bp->t2_mapping);
5878         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5879         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5880         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5881         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5882
5883         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5884         /* Port SRCH comes here */
5885 #endif
5886         /* Port CDU comes here */
5887         /* Port CFC comes here */
5888
5889         if (CHIP_IS_E1(bp)) {
5890                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5891                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5892         }
5893         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5894                              port ? HC_PORT1_END : HC_PORT0_END);
5895
5896         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5897                                     MISC_AEU_PORT0_START,
5898                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5899         /* init aeu_mask_attn_func_0/1:
5900          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5901          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5902          *             bits 4-7 are used for "per vn group attention" */
5903         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5904                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5905
5906         /* Port PXPCS comes here */
5907         /* Port EMAC0 comes here */
5908         /* Port EMAC1 comes here */
5909         /* Port DBU comes here */
5910         /* Port DBG comes here */
5911
5912         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5913                              port ? NIG_PORT1_END : NIG_PORT0_END);
5914
5915         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5916
5917         if (CHIP_IS_E1H(bp)) {
5918                 /* 0x2 disable e1hov, 0x1 enable */
5919                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5920                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5921
5922                 /* support pause requests from USDM, TSDM and BRB */
5923                 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5924
5925                 {
5926                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5927                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5928                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5929                 }
5930         }
5931
5932         /* Port MCP comes here */
5933         /* Port DMAE comes here */
5934
5935         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5936         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5937                 {
5938                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5939
5940                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5941                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5942
5943                 /* The GPIO should be swapped if the swap register is
5944                    set and active */
5945                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5946                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5947
5948                 /* Select function upon port-swap configuration */
5949                 if (port == 0) {
5950                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5951                         aeu_gpio_mask = (swap_val && swap_override) ?
5952                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5953                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5954                 } else {
5955                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5956                         aeu_gpio_mask = (swap_val && swap_override) ?
5957                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5958                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5959                 }
5960                 val = REG_RD(bp, offset);
5961                 /* add GPIO3 to group */
5962                 val |= aeu_gpio_mask;
5963                 REG_WR(bp, offset, val);
5964                 }
5965                 break;
5966
5967         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5968                 /* add SPIO 5 to group 0 */
5969                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5970                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5971                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5972                 break;
5973
5974         default:
5975                 break;
5976         }
5977
5978         bnx2x__link_reset(bp);
5979
5980         return 0;
5981 }
5982
5983 #define ILT_PER_FUNC            (768/2)
5984 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5985 /* the phys address is shifted right 12 bits and has an added
5986    1=valid bit added to the 53rd bit
5987    then since this is a wide register(TM)
5988    we split it into two 32 bit writes
5989  */
5990 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5991 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5992 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5993 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5994
5995 #define CNIC_ILT_LINES          0
5996
5997 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5998 {
5999         int reg;
6000
6001         if (CHIP_IS_E1H(bp))
6002                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6003         else /* E1 */
6004                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6005
6006         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6007 }
6008
6009 static int bnx2x_init_func(struct bnx2x *bp)
6010 {
6011         int port = BP_PORT(bp);
6012         int func = BP_FUNC(bp);
6013         u32 addr, val;
6014         int i;
6015
6016         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6017
6018         /* set MSI reconfigure capability */
6019         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6020         val = REG_RD(bp, addr);
6021         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6022         REG_WR(bp, addr, val);
6023
6024         i = FUNC_ILT_BASE(func);
6025
6026         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6027         if (CHIP_IS_E1H(bp)) {
6028                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6029                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6030         } else /* E1 */
6031                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6032                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6033
6034
6035         if (CHIP_IS_E1H(bp)) {
6036                 for (i = 0; i < 9; i++)
6037                         bnx2x_init_block(bp,
6038                                          cm_start[func][i], cm_end[func][i]);
6039
6040                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6041                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6042         }
6043
6044         /* HC init per function */
6045         if (CHIP_IS_E1H(bp)) {
6046                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6047
6048                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6049                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6050         }
6051         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6052
6053         /* Reset PCIE errors for debug */
6054         REG_WR(bp, 0x2114, 0xffffffff);
6055         REG_WR(bp, 0x2120, 0xffffffff);
6056
6057         return 0;
6058 }
6059
6060 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6061 {
6062         int i, rc = 0;
6063
6064         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6065            BP_FUNC(bp), load_code);
6066
6067         bp->dmae_ready = 0;
6068         mutex_init(&bp->dmae_mutex);
6069         bnx2x_gunzip_init(bp);
6070
6071         switch (load_code) {
6072         case FW_MSG_CODE_DRV_LOAD_COMMON:
6073                 rc = bnx2x_init_common(bp);
6074                 if (rc)
6075                         goto init_hw_err;
6076                 /* no break */
6077
6078         case FW_MSG_CODE_DRV_LOAD_PORT:
6079                 bp->dmae_ready = 1;
6080                 rc = bnx2x_init_port(bp);
6081                 if (rc)
6082                         goto init_hw_err;
6083                 /* no break */
6084
6085         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6086                 bp->dmae_ready = 1;
6087                 rc = bnx2x_init_func(bp);
6088                 if (rc)
6089                         goto init_hw_err;
6090                 break;
6091
6092         default:
6093                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6094                 break;
6095         }
6096
6097         if (!BP_NOMCP(bp)) {
6098                 int func = BP_FUNC(bp);
6099
6100                 bp->fw_drv_pulse_wr_seq =
6101                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6102                                  DRV_PULSE_SEQ_MASK);
6103                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6104                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
6105                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
6106         } else
6107                 bp->func_stx = 0;
6108
6109         /* this needs to be done before gunzip end */
6110         bnx2x_zero_def_sb(bp);
6111         for_each_queue(bp, i)
6112                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6113
6114 init_hw_err:
6115         bnx2x_gunzip_end(bp);
6116
6117         return rc;
6118 }
6119
6120 /* send the MCP a request, block until there is a reply */
6121 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6122 {
6123         int func = BP_FUNC(bp);
6124         u32 seq = ++bp->fw_seq;
6125         u32 rc = 0;
6126         u32 cnt = 1;
6127         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6128
6129         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6130         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6131
6132         do {
6133                 /* let the FW do it's magic ... */
6134                 msleep(delay);
6135
6136                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6137
6138                 /* Give the FW up to 2 second (200*10ms) */
6139         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6140
6141         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6142            cnt*delay, rc, seq);
6143
6144         /* is this a reply to our command? */
6145         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6146                 rc &= FW_MSG_CODE_MASK;
6147
6148         } else {
6149                 /* FW BUG! */
6150                 BNX2X_ERR("FW failed to respond!\n");
6151                 bnx2x_fw_dump(bp);
6152                 rc = 0;
6153         }
6154
6155         return rc;
6156 }
6157
6158 static void bnx2x_free_mem(struct bnx2x *bp)
6159 {
6160
6161 #define BNX2X_PCI_FREE(x, y, size) \
6162         do { \
6163                 if (x) { \
6164                         pci_free_consistent(bp->pdev, size, x, y); \
6165                         x = NULL; \
6166                         y = 0; \
6167                 } \
6168         } while (0)
6169
6170 #define BNX2X_FREE(x) \
6171         do { \
6172                 if (x) { \
6173                         vfree(x); \
6174                         x = NULL; \
6175                 } \
6176         } while (0)
6177
6178         int i;
6179
6180         /* fastpath */
6181         /* Common */
6182         for_each_queue(bp, i) {
6183
6184                 /* status blocks */
6185                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6186                                bnx2x_fp(bp, i, status_blk_mapping),
6187                                sizeof(struct host_status_block) +
6188                                sizeof(struct eth_tx_db_data));
6189         }
6190         /* Rx */
6191         for_each_rx_queue(bp, i) {
6192
6193                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6194                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6195                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6196                                bnx2x_fp(bp, i, rx_desc_mapping),
6197                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6198
6199                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6200                                bnx2x_fp(bp, i, rx_comp_mapping),
6201                                sizeof(struct eth_fast_path_rx_cqe) *
6202                                NUM_RCQ_BD);
6203
6204                 /* SGE ring */
6205                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6206                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6207                                bnx2x_fp(bp, i, rx_sge_mapping),
6208                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6209         }
6210         /* Tx */
6211         for_each_tx_queue(bp, i) {
6212
6213                 /* fastpath tx rings: tx_buf tx_desc */
6214                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6215                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6216                                bnx2x_fp(bp, i, tx_desc_mapping),
6217                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
6218         }
6219         /* end of fastpath */
6220
6221         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6222                        sizeof(struct host_def_status_block));
6223
6224         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6225                        sizeof(struct bnx2x_slowpath));
6226
6227 #ifdef BCM_ISCSI
6228         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6229         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6230         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6231         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6232 #endif
6233         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6234
6235 #undef BNX2X_PCI_FREE
6236 #undef BNX2X_KFREE
6237 }
6238
6239 static int bnx2x_alloc_mem(struct bnx2x *bp)
6240 {
6241
6242 #define BNX2X_PCI_ALLOC(x, y, size) \
6243         do { \
6244                 x = pci_alloc_consistent(bp->pdev, size, y); \
6245                 if (x == NULL) \
6246                         goto alloc_mem_err; \
6247                 memset(x, 0, size); \
6248         } while (0)
6249
6250 #define BNX2X_ALLOC(x, size) \
6251         do { \
6252                 x = vmalloc(size); \
6253                 if (x == NULL) \
6254                         goto alloc_mem_err; \
6255                 memset(x, 0, size); \
6256         } while (0)
6257
6258         int i;
6259
6260         /* fastpath */
6261         /* Common */
6262         for_each_queue(bp, i) {
6263                 bnx2x_fp(bp, i, bp) = bp;
6264
6265                 /* status blocks */
6266                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6267                                 &bnx2x_fp(bp, i, status_blk_mapping),
6268                                 sizeof(struct host_status_block) +
6269                                 sizeof(struct eth_tx_db_data));
6270         }
6271         /* Rx */
6272         for_each_rx_queue(bp, i) {
6273
6274                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6275                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6276                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6277                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6278                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6279                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6280
6281                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6282                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6283                                 sizeof(struct eth_fast_path_rx_cqe) *
6284                                 NUM_RCQ_BD);
6285
6286                 /* SGE ring */
6287                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6288                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6289                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6290                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6291                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6292         }
6293         /* Tx */
6294         for_each_tx_queue(bp, i) {
6295
6296                 bnx2x_fp(bp, i, hw_tx_prods) =
6297                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6298
6299                 bnx2x_fp(bp, i, tx_prods_mapping) =
6300                                 bnx2x_fp(bp, i, status_blk_mapping) +
6301                                 sizeof(struct host_status_block);
6302
6303                 /* fastpath tx rings: tx_buf tx_desc */
6304                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6305                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6306                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6307                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6308                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6309         }
6310         /* end of fastpath */
6311
6312         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6313                         sizeof(struct host_def_status_block));
6314
6315         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6316                         sizeof(struct bnx2x_slowpath));
6317
6318 #ifdef BCM_ISCSI
6319         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6320
6321         /* Initialize T1 */
6322         for (i = 0; i < 64*1024; i += 64) {
6323                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6324                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6325         }
6326
6327         /* allocate searcher T2 table
6328            we allocate 1/4 of alloc num for T2
6329           (which is not entered into the ILT) */
6330         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6331
6332         /* Initialize T2 */
6333         for (i = 0; i < 16*1024; i += 64)
6334                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6335
6336         /* now fixup the last line in the block to point to the next block */
6337         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6338
6339         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6340         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6341
6342         /* QM queues (128*MAX_CONN) */
6343         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6344 #endif
6345
6346         /* Slow path ring */
6347         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6348
6349         return 0;
6350
6351 alloc_mem_err:
6352         bnx2x_free_mem(bp);
6353         return -ENOMEM;
6354
6355 #undef BNX2X_PCI_ALLOC
6356 #undef BNX2X_ALLOC
6357 }
6358
6359 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6360 {
6361         int i;
6362
6363         for_each_tx_queue(bp, i) {
6364                 struct bnx2x_fastpath *fp = &bp->fp[i];
6365
6366                 u16 bd_cons = fp->tx_bd_cons;
6367                 u16 sw_prod = fp->tx_pkt_prod;
6368                 u16 sw_cons = fp->tx_pkt_cons;
6369
6370                 while (sw_cons != sw_prod) {
6371                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6372                         sw_cons++;
6373                 }
6374         }
6375 }
6376
6377 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6378 {
6379         int i, j;
6380
6381         for_each_rx_queue(bp, j) {
6382                 struct bnx2x_fastpath *fp = &bp->fp[j];
6383
6384                 for (i = 0; i < NUM_RX_BD; i++) {
6385                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6386                         struct sk_buff *skb = rx_buf->skb;
6387
6388                         if (skb == NULL)
6389                                 continue;
6390
6391                         pci_unmap_single(bp->pdev,
6392                                          pci_unmap_addr(rx_buf, mapping),
6393                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6394
6395                         rx_buf->skb = NULL;
6396                         dev_kfree_skb(skb);
6397                 }
6398                 if (!fp->disable_tpa)
6399                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6400                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6401                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6402         }
6403 }
6404
6405 static void bnx2x_free_skbs(struct bnx2x *bp)
6406 {
6407         bnx2x_free_tx_skbs(bp);
6408         bnx2x_free_rx_skbs(bp);
6409 }
6410
6411 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6412 {
6413         int i, offset = 1;
6414
6415         free_irq(bp->msix_table[0].vector, bp->dev);
6416         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6417            bp->msix_table[0].vector);
6418
6419         for_each_queue(bp, i) {
6420                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6421                    "state %x\n", i, bp->msix_table[i + offset].vector,
6422                    bnx2x_fp(bp, i, state));
6423
6424                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6425         }
6426 }
6427
6428 static void bnx2x_free_irq(struct bnx2x *bp)
6429 {
6430         if (bp->flags & USING_MSIX_FLAG) {
6431                 bnx2x_free_msix_irqs(bp);
6432                 pci_disable_msix(bp->pdev);
6433                 bp->flags &= ~USING_MSIX_FLAG;
6434
6435         } else if (bp->flags & USING_MSI_FLAG) {
6436                 free_irq(bp->pdev->irq, bp->dev);
6437                 pci_disable_msi(bp->pdev);
6438                 bp->flags &= ~USING_MSI_FLAG;
6439
6440         } else
6441                 free_irq(bp->pdev->irq, bp->dev);
6442 }
6443
6444 static int bnx2x_enable_msix(struct bnx2x *bp)
6445 {
6446         int i, rc, offset = 1;
6447         int igu_vec = 0;
6448
6449         bp->msix_table[0].entry = igu_vec;
6450         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6451
6452         for_each_queue(bp, i) {
6453                 igu_vec = BP_L_ID(bp) + offset + i;
6454                 bp->msix_table[i + offset].entry = igu_vec;
6455                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6456                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6457         }
6458
6459         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6460                              BNX2X_NUM_QUEUES(bp) + offset);
6461         if (rc) {
6462                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
6463                 return rc;
6464         }
6465
6466         bp->flags |= USING_MSIX_FLAG;
6467
6468         return 0;
6469 }
6470
6471 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6472 {
6473         int i, rc, offset = 1;
6474
6475         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6476                          bp->dev->name, bp->dev);
6477         if (rc) {
6478                 BNX2X_ERR("request sp irq failed\n");
6479                 return -EBUSY;
6480         }
6481
6482         for_each_queue(bp, i) {
6483                 struct bnx2x_fastpath *fp = &bp->fp[i];
6484
6485                 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6486                 rc = request_irq(bp->msix_table[i + offset].vector,
6487                                  bnx2x_msix_fp_int, 0, fp->name, fp);
6488                 if (rc) {
6489                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
6490                         bnx2x_free_msix_irqs(bp);
6491                         return -EBUSY;
6492                 }
6493
6494                 fp->state = BNX2X_FP_STATE_IRQ;
6495         }
6496
6497         i = BNX2X_NUM_QUEUES(bp);
6498         if (is_multi(bp))
6499                 printk(KERN_INFO PFX
6500                        "%s: using MSI-X  IRQs: sp %d  fp %d - %d\n",
6501                        bp->dev->name, bp->msix_table[0].vector,
6502                        bp->msix_table[offset].vector,
6503                        bp->msix_table[offset + i - 1].vector);
6504         else
6505                 printk(KERN_INFO PFX "%s: using MSI-X  IRQs: sp %d  fp %d\n",
6506                        bp->dev->name, bp->msix_table[0].vector,
6507                        bp->msix_table[offset + i - 1].vector);
6508
6509         return 0;
6510 }
6511
6512 static int bnx2x_enable_msi(struct bnx2x *bp)
6513 {
6514         int rc;
6515
6516         rc = pci_enable_msi(bp->pdev);
6517         if (rc) {
6518                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6519                 return -1;
6520         }
6521         bp->flags |= USING_MSI_FLAG;
6522
6523         return 0;
6524 }
6525
6526 static int bnx2x_req_irq(struct bnx2x *bp)
6527 {
6528         unsigned long flags;
6529         int rc;
6530
6531         if (bp->flags & USING_MSI_FLAG)
6532                 flags = 0;
6533         else
6534                 flags = IRQF_SHARED;
6535
6536         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6537                          bp->dev->name, bp->dev);
6538         if (!rc)
6539                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6540
6541         return rc;
6542 }
6543
6544 static void bnx2x_napi_enable(struct bnx2x *bp)
6545 {
6546         int i;
6547
6548         for_each_rx_queue(bp, i)
6549                 napi_enable(&bnx2x_fp(bp, i, napi));
6550 }
6551
6552 static void bnx2x_napi_disable(struct bnx2x *bp)
6553 {
6554         int i;
6555
6556         for_each_rx_queue(bp, i)
6557                 napi_disable(&bnx2x_fp(bp, i, napi));
6558 }
6559
6560 static void bnx2x_netif_start(struct bnx2x *bp)
6561 {
6562         if (atomic_dec_and_test(&bp->intr_sem)) {
6563                 if (netif_running(bp->dev)) {
6564                         bnx2x_napi_enable(bp);
6565                         bnx2x_int_enable(bp);
6566                         if (bp->state == BNX2X_STATE_OPEN)
6567                                 netif_tx_wake_all_queues(bp->dev);
6568                 }
6569         }
6570 }
6571
6572 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6573 {
6574         bnx2x_int_disable_sync(bp, disable_hw);
6575         bnx2x_napi_disable(bp);
6576         if (netif_running(bp->dev)) {
6577                 netif_tx_disable(bp->dev);
6578                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6579         }
6580 }
6581
6582 /*
6583  * Init service functions
6584  */
6585
6586 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6587 {
6588         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6589         int port = BP_PORT(bp);
6590
6591         /* CAM allocation
6592          * unicasts 0-31:port0 32-63:port1
6593          * multicast 64-127:port0 128-191:port1
6594          */
6595         config->hdr.length = 2;
6596         config->hdr.offset = port ? 32 : 0;
6597         config->hdr.client_id = bp->fp->cl_id;
6598         config->hdr.reserved1 = 0;
6599
6600         /* primary MAC */
6601         config->config_table[0].cam_entry.msb_mac_addr =
6602                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6603         config->config_table[0].cam_entry.middle_mac_addr =
6604                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6605         config->config_table[0].cam_entry.lsb_mac_addr =
6606                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6607         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6608         if (set)
6609                 config->config_table[0].target_table_entry.flags = 0;
6610         else
6611                 CAM_INVALIDATE(config->config_table[0]);
6612         config->config_table[0].target_table_entry.client_id = 0;
6613         config->config_table[0].target_table_entry.vlan_id = 0;
6614
6615         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6616            (set ? "setting" : "clearing"),
6617            config->config_table[0].cam_entry.msb_mac_addr,
6618            config->config_table[0].cam_entry.middle_mac_addr,
6619            config->config_table[0].cam_entry.lsb_mac_addr);
6620
6621         /* broadcast */
6622         config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6623         config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6624         config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
6625         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6626         if (set)
6627                 config->config_table[1].target_table_entry.flags =
6628                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6629         else
6630                 CAM_INVALIDATE(config->config_table[1]);
6631         config->config_table[1].target_table_entry.client_id = 0;
6632         config->config_table[1].target_table_entry.vlan_id = 0;
6633
6634         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6635                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6636                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6637 }
6638
6639 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6640 {
6641         struct mac_configuration_cmd_e1h *config =
6642                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6643
6644         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6645                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6646                 return;
6647         }
6648
6649         /* CAM allocation for E1H
6650          * unicasts: by func number
6651          * multicast: 20+FUNC*20, 20 each
6652          */
6653         config->hdr.length = 1;
6654         config->hdr.offset = BP_FUNC(bp);
6655         config->hdr.client_id = bp->fp->cl_id;
6656         config->hdr.reserved1 = 0;
6657
6658         /* primary MAC */
6659         config->config_table[0].msb_mac_addr =
6660                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6661         config->config_table[0].middle_mac_addr =
6662                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6663         config->config_table[0].lsb_mac_addr =
6664                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6665         config->config_table[0].client_id = BP_L_ID(bp);
6666         config->config_table[0].vlan_id = 0;
6667         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6668         if (set)
6669                 config->config_table[0].flags = BP_PORT(bp);
6670         else
6671                 config->config_table[0].flags =
6672                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6673
6674         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6675            (set ? "setting" : "clearing"),
6676            config->config_table[0].msb_mac_addr,
6677            config->config_table[0].middle_mac_addr,
6678            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6679
6680         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6681                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6682                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6683 }
6684
6685 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6686                              int *state_p, int poll)
6687 {
6688         /* can take a while if any port is running */
6689         int cnt = 5000;
6690
6691         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6692            poll ? "polling" : "waiting", state, idx);
6693
6694         might_sleep();
6695         while (cnt--) {
6696                 if (poll) {
6697                         bnx2x_rx_int(bp->fp, 10);
6698                         /* if index is different from 0
6699                          * the reply for some commands will
6700                          * be on the non default queue
6701                          */
6702                         if (idx)
6703                                 bnx2x_rx_int(&bp->fp[idx], 10);
6704                 }
6705
6706                 mb(); /* state is changed by bnx2x_sp_event() */
6707                 if (*state_p == state) {
6708 #ifdef BNX2X_STOP_ON_ERROR
6709                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
6710 #endif
6711                         return 0;
6712                 }
6713
6714                 msleep(1);
6715         }
6716
6717         /* timeout! */
6718         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6719                   poll ? "polling" : "waiting", state, idx);
6720 #ifdef BNX2X_STOP_ON_ERROR
6721         bnx2x_panic();
6722 #endif
6723
6724         return -EBUSY;
6725 }
6726
6727 static int bnx2x_setup_leading(struct bnx2x *bp)
6728 {
6729         int rc;
6730
6731         /* reset IGU state */
6732         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6733
6734         /* SETUP ramrod */
6735         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6736
6737         /* Wait for completion */
6738         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6739
6740         return rc;
6741 }
6742
6743 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6744 {
6745         struct bnx2x_fastpath *fp = &bp->fp[index];
6746
6747         /* reset IGU state */
6748         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6749
6750         /* SETUP ramrod */
6751         fp->state = BNX2X_FP_STATE_OPENING;
6752         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6753                       fp->cl_id, 0);
6754
6755         /* Wait for completion */
6756         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6757                                  &(fp->state), 0);
6758 }
6759
6760 static int bnx2x_poll(struct napi_struct *napi, int budget);
6761
6762 static void bnx2x_set_int_mode(struct bnx2x *bp)
6763 {
6764         int num_queues;
6765
6766         switch (int_mode) {
6767         case INT_MODE_INTx:
6768         case INT_MODE_MSI:
6769                 num_queues = 1;
6770                 bp->num_rx_queues = num_queues;
6771                 bp->num_tx_queues = num_queues;
6772                 DP(NETIF_MSG_IFUP,
6773                    "set number of queues to %d\n", num_queues);
6774                 break;
6775
6776         case INT_MODE_MSIX:
6777         default:
6778                 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6779                         num_queues = min_t(u32, num_online_cpus(),
6780                                            BNX2X_MAX_QUEUES(bp));
6781                 else
6782                         num_queues = 1;
6783                 bp->num_rx_queues = num_queues;
6784                 bp->num_tx_queues = num_queues;
6785                 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6786                    "  number of tx queues to %d\n",
6787                    bp->num_rx_queues, bp->num_tx_queues);
6788                 /* if we can't use MSI-X we only need one fp,
6789                  * so try to enable MSI-X with the requested number of fp's
6790                  * and fallback to MSI or legacy INTx with one fp
6791                  */
6792                 if (bnx2x_enable_msix(bp)) {
6793                         /* failed to enable MSI-X */
6794                         num_queues = 1;
6795                         bp->num_rx_queues = num_queues;
6796                         bp->num_tx_queues = num_queues;
6797                         if (bp->multi_mode)
6798                                 BNX2X_ERR("Multi requested but failed to "
6799                                           "enable MSI-X  set number of "
6800                                           "queues to %d\n", num_queues);
6801                 }
6802                 break;
6803         }
6804         bp->dev->real_num_tx_queues = bp->num_tx_queues;
6805 }
6806
6807 static void bnx2x_set_rx_mode(struct net_device *dev);
6808
6809 /* must be called with rtnl_lock */
6810 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6811 {
6812         u32 load_code;
6813         int i, rc = 0;
6814 #ifdef BNX2X_STOP_ON_ERROR
6815         DP(NETIF_MSG_IFUP, "enter  load_mode %d\n", load_mode);
6816         if (unlikely(bp->panic))
6817                 return -EPERM;
6818 #endif
6819
6820         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6821
6822         bnx2x_set_int_mode(bp);
6823
6824         if (bnx2x_alloc_mem(bp))
6825                 return -ENOMEM;
6826
6827         for_each_rx_queue(bp, i)
6828                 bnx2x_fp(bp, i, disable_tpa) =
6829                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6830
6831         for_each_rx_queue(bp, i)
6832                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6833                                bnx2x_poll, 128);
6834
6835 #ifdef BNX2X_STOP_ON_ERROR
6836         for_each_rx_queue(bp, i) {
6837                 struct bnx2x_fastpath *fp = &bp->fp[i];
6838
6839                 fp->poll_no_work = 0;
6840                 fp->poll_calls = 0;
6841                 fp->poll_max_calls = 0;
6842                 fp->poll_complete = 0;
6843                 fp->poll_exit = 0;
6844         }
6845 #endif
6846         bnx2x_napi_enable(bp);
6847
6848         if (bp->flags & USING_MSIX_FLAG) {
6849                 rc = bnx2x_req_msix_irqs(bp);
6850                 if (rc) {
6851                         pci_disable_msix(bp->pdev);
6852                         goto load_error1;
6853                 }
6854         } else {
6855                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6856                         bnx2x_enable_msi(bp);
6857                 bnx2x_ack_int(bp);
6858                 rc = bnx2x_req_irq(bp);
6859                 if (rc) {
6860                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
6861                         if (bp->flags & USING_MSI_FLAG)
6862                                 pci_disable_msi(bp->pdev);
6863                         goto load_error1;
6864                 }
6865                 if (bp->flags & USING_MSI_FLAG) {
6866                         bp->dev->irq = bp->pdev->irq;
6867                         printk(KERN_INFO PFX "%s: using MSI  IRQ %d\n",
6868                                bp->dev->name, bp->pdev->irq);
6869                 }
6870         }
6871
6872         /* Send LOAD_REQUEST command to MCP
6873            Returns the type of LOAD command:
6874            if it is the first port to be initialized
6875            common blocks should be initialized, otherwise - not
6876         */
6877         if (!BP_NOMCP(bp)) {
6878                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6879                 if (!load_code) {
6880                         BNX2X_ERR("MCP response failure, aborting\n");
6881                         rc = -EBUSY;
6882                         goto load_error2;
6883                 }
6884                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6885                         rc = -EBUSY; /* other port in diagnostic mode */
6886                         goto load_error2;
6887                 }
6888
6889         } else {
6890                 int port = BP_PORT(bp);
6891
6892                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
6893                    load_count[0], load_count[1], load_count[2]);
6894                 load_count[0]++;
6895                 load_count[1 + port]++;
6896                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
6897                    load_count[0], load_count[1], load_count[2]);
6898                 if (load_count[0] == 1)
6899                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6900                 else if (load_count[1 + port] == 1)
6901                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6902                 else
6903                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6904         }
6905
6906         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6907             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6908                 bp->port.pmf = 1;
6909         else
6910                 bp->port.pmf = 0;
6911         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6912
6913         /* Initialize HW */
6914         rc = bnx2x_init_hw(bp, load_code);
6915         if (rc) {
6916                 BNX2X_ERR("HW init failed, aborting\n");
6917                 goto load_error2;
6918         }
6919
6920         /* Setup NIC internals and enable interrupts */
6921         bnx2x_nic_init(bp, load_code);
6922
6923         /* Send LOAD_DONE command to MCP */
6924         if (!BP_NOMCP(bp)) {
6925                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6926                 if (!load_code) {
6927                         BNX2X_ERR("MCP response failure, aborting\n");
6928                         rc = -EBUSY;
6929                         goto load_error3;
6930                 }
6931         }
6932
6933         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6934
6935         rc = bnx2x_setup_leading(bp);
6936         if (rc) {
6937                 BNX2X_ERR("Setup leading failed!\n");
6938                 goto load_error3;
6939         }
6940
6941         if (CHIP_IS_E1H(bp))
6942                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6943                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
6944                         bp->state = BNX2X_STATE_DISABLED;
6945                 }
6946
6947         if (bp->state == BNX2X_STATE_OPEN)
6948                 for_each_nondefault_queue(bp, i) {
6949                         rc = bnx2x_setup_multi(bp, i);
6950                         if (rc)
6951                                 goto load_error3;
6952                 }
6953
6954         if (CHIP_IS_E1(bp))
6955                 bnx2x_set_mac_addr_e1(bp, 1);
6956         else
6957                 bnx2x_set_mac_addr_e1h(bp, 1);
6958
6959         if (bp->port.pmf)
6960                 bnx2x_initial_phy_init(bp, load_mode);
6961
6962         /* Start fast path */
6963         switch (load_mode) {
6964         case LOAD_NORMAL:
6965                 /* Tx queue should be only reenabled */
6966                 netif_tx_wake_all_queues(bp->dev);
6967                 /* Initialize the receive filter. */
6968                 bnx2x_set_rx_mode(bp->dev);
6969                 break;
6970
6971         case LOAD_OPEN:
6972                 netif_tx_start_all_queues(bp->dev);
6973                 /* Initialize the receive filter. */
6974                 bnx2x_set_rx_mode(bp->dev);
6975                 break;
6976
6977         case LOAD_DIAG:
6978                 /* Initialize the receive filter. */
6979                 bnx2x_set_rx_mode(bp->dev);
6980                 bp->state = BNX2X_STATE_DIAG;
6981                 break;
6982
6983         default:
6984                 break;
6985         }
6986
6987         if (!bp->port.pmf)
6988                 bnx2x__link_status_update(bp);
6989
6990         /* start the timer */
6991         mod_timer(&bp->timer, jiffies + bp->current_interval);
6992
6993
6994         return 0;
6995
6996 load_error3:
6997         bnx2x_int_disable_sync(bp, 1);
6998         if (!BP_NOMCP(bp)) {
6999                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7000                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7001         }
7002         bp->port.pmf = 0;
7003         /* Free SKBs, SGEs, TPA pool and driver internals */
7004         bnx2x_free_skbs(bp);
7005         for_each_rx_queue(bp, i)
7006                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7007 load_error2:
7008         /* Release IRQs */
7009         bnx2x_free_irq(bp);
7010 load_error1:
7011         bnx2x_napi_disable(bp);
7012         for_each_rx_queue(bp, i)
7013                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7014         bnx2x_free_mem(bp);
7015
7016         return rc;
7017 }
7018
7019 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7020 {
7021         struct bnx2x_fastpath *fp = &bp->fp[index];
7022         int rc;
7023
7024         /* halt the connection */
7025         fp->state = BNX2X_FP_STATE_HALTING;
7026         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7027
7028         /* Wait for completion */
7029         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7030                                &(fp->state), 1);
7031         if (rc) /* timeout */
7032                 return rc;
7033
7034         /* delete cfc entry */
7035         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7036
7037         /* Wait for completion */
7038         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7039                                &(fp->state), 1);
7040         return rc;
7041 }
7042
7043 static int bnx2x_stop_leading(struct bnx2x *bp)
7044 {
7045         __le16 dsb_sp_prod_idx;
7046         /* if the other port is handling traffic,
7047            this can take a lot of time */
7048         int cnt = 500;
7049         int rc;
7050
7051         might_sleep();
7052
7053         /* Send HALT ramrod */
7054         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7055         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7056
7057         /* Wait for completion */
7058         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7059                                &(bp->fp[0].state), 1);
7060         if (rc) /* timeout */
7061                 return rc;
7062
7063         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7064
7065         /* Send PORT_DELETE ramrod */
7066         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7067
7068         /* Wait for completion to arrive on default status block
7069            we are going to reset the chip anyway
7070            so there is not much to do if this times out
7071          */
7072         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7073                 if (!cnt) {
7074                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7075                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7076                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7077 #ifdef BNX2X_STOP_ON_ERROR
7078                         bnx2x_panic();
7079 #endif
7080                         rc = -EBUSY;
7081                         break;
7082                 }
7083                 cnt--;
7084                 msleep(1);
7085                 rmb(); /* Refresh the dsb_sp_prod */
7086         }
7087         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7088         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7089
7090         return rc;
7091 }
7092
7093 static void bnx2x_reset_func(struct bnx2x *bp)
7094 {
7095         int port = BP_PORT(bp);
7096         int func = BP_FUNC(bp);
7097         int base, i;
7098
7099         /* Configure IGU */
7100         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7101         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7102
7103         /* Clear ILT */
7104         base = FUNC_ILT_BASE(func);
7105         for (i = base; i < base + ILT_PER_FUNC; i++)
7106                 bnx2x_ilt_wr(bp, i, 0);
7107 }
7108
7109 static void bnx2x_reset_port(struct bnx2x *bp)
7110 {
7111         int port = BP_PORT(bp);
7112         u32 val;
7113
7114         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7115
7116         /* Do not rcv packets to BRB */
7117         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7118         /* Do not direct rcv packets that are not for MCP to the BRB */
7119         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7120                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7121
7122         /* Configure AEU */
7123         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7124
7125         msleep(100);
7126         /* Check for BRB port occupancy */
7127         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7128         if (val)
7129                 DP(NETIF_MSG_IFDOWN,
7130                    "BRB1 is not empty  %d blocks are occupied\n", val);
7131
7132         /* TODO: Close Doorbell port? */
7133 }
7134
7135 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7136 {
7137         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7138            BP_FUNC(bp), reset_code);
7139
7140         switch (reset_code) {
7141         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7142                 bnx2x_reset_port(bp);
7143                 bnx2x_reset_func(bp);
7144                 bnx2x_reset_common(bp);
7145                 break;
7146
7147         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7148                 bnx2x_reset_port(bp);
7149                 bnx2x_reset_func(bp);
7150                 break;
7151
7152         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7153                 bnx2x_reset_func(bp);
7154                 break;
7155
7156         default:
7157                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7158                 break;
7159         }
7160 }
7161
7162 /* must be called with rtnl_lock */
7163 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7164 {
7165         int port = BP_PORT(bp);
7166         u32 reset_code = 0;
7167         int i, cnt, rc;
7168
7169         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7170
7171         bp->rx_mode = BNX2X_RX_MODE_NONE;
7172         bnx2x_set_storm_rx_mode(bp);
7173
7174         bnx2x_netif_stop(bp, 1);
7175
7176         del_timer_sync(&bp->timer);
7177         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7178                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7179         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7180
7181         /* Release IRQs */
7182         bnx2x_free_irq(bp);
7183
7184         /* Wait until tx fastpath tasks complete */
7185         for_each_tx_queue(bp, i) {
7186                 struct bnx2x_fastpath *fp = &bp->fp[i];
7187
7188                 cnt = 1000;
7189                 while (bnx2x_has_tx_work_unload(fp)) {
7190
7191                         bnx2x_tx_int(fp);
7192                         if (!cnt) {
7193                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7194                                           i);
7195 #ifdef BNX2X_STOP_ON_ERROR
7196                                 bnx2x_panic();
7197                                 return -EBUSY;
7198 #else
7199                                 break;
7200 #endif
7201                         }
7202                         cnt--;
7203                         msleep(1);
7204                 }
7205         }
7206         /* Give HW time to discard old tx messages */
7207         msleep(1);
7208
7209         if (CHIP_IS_E1(bp)) {
7210                 struct mac_configuration_cmd *config =
7211                                                 bnx2x_sp(bp, mcast_config);
7212
7213                 bnx2x_set_mac_addr_e1(bp, 0);
7214
7215                 for (i = 0; i < config->hdr.length; i++)
7216                         CAM_INVALIDATE(config->config_table[i]);
7217
7218                 config->hdr.length = i;
7219                 if (CHIP_REV_IS_SLOW(bp))
7220                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7221                 else
7222                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7223                 config->hdr.client_id = bp->fp->cl_id;
7224                 config->hdr.reserved1 = 0;
7225
7226                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7227                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7228                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7229
7230         } else { /* E1H */
7231                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7232
7233                 bnx2x_set_mac_addr_e1h(bp, 0);
7234
7235                 for (i = 0; i < MC_HASH_SIZE; i++)
7236                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7237         }
7238
7239         if (unload_mode == UNLOAD_NORMAL)
7240                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7241
7242         else if (bp->flags & NO_WOL_FLAG) {
7243                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7244                 if (CHIP_IS_E1H(bp))
7245                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7246
7247         } else if (bp->wol) {
7248                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7249                 u8 *mac_addr = bp->dev->dev_addr;
7250                 u32 val;
7251                 /* The mac address is written to entries 1-4 to
7252                    preserve entry 0 which is used by the PMF */
7253                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7254
7255                 val = (mac_addr[0] << 8) | mac_addr[1];
7256                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7257
7258                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7259                       (mac_addr[4] << 8) | mac_addr[5];
7260                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7261
7262                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7263
7264         } else
7265                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7266
7267         /* Close multi and leading connections
7268            Completions for ramrods are collected in a synchronous way */
7269         for_each_nondefault_queue(bp, i)
7270                 if (bnx2x_stop_multi(bp, i))
7271                         goto unload_error;
7272
7273         rc = bnx2x_stop_leading(bp);
7274         if (rc) {
7275                 BNX2X_ERR("Stop leading failed!\n");
7276 #ifdef BNX2X_STOP_ON_ERROR
7277                 return -EBUSY;
7278 #else
7279                 goto unload_error;
7280 #endif
7281         }
7282
7283 unload_error:
7284         if (!BP_NOMCP(bp))
7285                 reset_code = bnx2x_fw_command(bp, reset_code);
7286         else {
7287                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
7288                    load_count[0], load_count[1], load_count[2]);
7289                 load_count[0]--;
7290                 load_count[1 + port]--;
7291                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
7292                    load_count[0], load_count[1], load_count[2]);
7293                 if (load_count[0] == 0)
7294                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7295                 else if (load_count[1 + port] == 0)
7296                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7297                 else
7298                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7299         }
7300
7301         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7302             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7303                 bnx2x__link_reset(bp);
7304
7305         /* Reset the chip */
7306         bnx2x_reset_chip(bp, reset_code);
7307
7308         /* Report UNLOAD_DONE to MCP */
7309         if (!BP_NOMCP(bp))
7310                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7311
7312         bp->port.pmf = 0;
7313
7314         /* Free SKBs, SGEs, TPA pool and driver internals */
7315         bnx2x_free_skbs(bp);
7316         for_each_rx_queue(bp, i)
7317                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7318         for_each_rx_queue(bp, i)
7319                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7320         bnx2x_free_mem(bp);
7321
7322         bp->state = BNX2X_STATE_CLOSED;
7323
7324         netif_carrier_off(bp->dev);
7325
7326         return 0;
7327 }
7328
7329 static void bnx2x_reset_task(struct work_struct *work)
7330 {
7331         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7332
7333 #ifdef BNX2X_STOP_ON_ERROR
7334         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7335                   " so reset not done to allow debug dump,\n"
7336          KERN_ERR " you will need to reboot when done\n");
7337         return;
7338 #endif
7339
7340         rtnl_lock();
7341
7342         if (!netif_running(bp->dev))
7343                 goto reset_task_exit;
7344
7345         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7346         bnx2x_nic_load(bp, LOAD_NORMAL);
7347
7348 reset_task_exit:
7349         rtnl_unlock();
7350 }
7351
7352 /* end of nic load/unload */
7353
7354 /* ethtool_ops */
7355
7356 /*
7357  * Init service functions
7358  */
7359
7360 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7361 {
7362         switch (func) {
7363         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7364         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7365         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7366         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7367         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7368         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7369         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7370         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7371         default:
7372                 BNX2X_ERR("Unsupported function index: %d\n", func);
7373                 return (u32)(-1);
7374         }
7375 }
7376
7377 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7378 {
7379         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7380
7381         /* Flush all outstanding writes */
7382         mmiowb();
7383
7384         /* Pretend to be function 0 */
7385         REG_WR(bp, reg, 0);
7386         /* Flush the GRC transaction (in the chip) */
7387         new_val = REG_RD(bp, reg);
7388         if (new_val != 0) {
7389                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7390                           new_val);
7391                 BUG();
7392         }
7393
7394         /* From now we are in the "like-E1" mode */
7395         bnx2x_int_disable(bp);
7396
7397         /* Flush all outstanding writes */
7398         mmiowb();
7399
7400         /* Restore the original funtion settings */
7401         REG_WR(bp, reg, orig_func);
7402         new_val = REG_RD(bp, reg);
7403         if (new_val != orig_func) {
7404                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7405                           orig_func, new_val);
7406                 BUG();
7407         }
7408 }
7409
7410 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7411 {
7412         if (CHIP_IS_E1H(bp))
7413                 bnx2x_undi_int_disable_e1h(bp, func);
7414         else
7415                 bnx2x_int_disable(bp);
7416 }
7417
7418 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7419 {
7420         u32 val;
7421
7422         /* Check if there is any driver already loaded */
7423         val = REG_RD(bp, MISC_REG_UNPREPARED);
7424         if (val == 0x1) {
7425                 /* Check if it is the UNDI driver
7426                  * UNDI driver initializes CID offset for normal bell to 0x7
7427                  */
7428                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7429                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7430                 if (val == 0x7) {
7431                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7432                         /* save our func */
7433                         int func = BP_FUNC(bp);
7434                         u32 swap_en;
7435                         u32 swap_val;
7436
7437                         /* clear the UNDI indication */
7438                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7439
7440                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
7441
7442                         /* try unload UNDI on port 0 */
7443                         bp->func = 0;
7444                         bp->fw_seq =
7445                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7446                                 DRV_MSG_SEQ_NUMBER_MASK);
7447                         reset_code = bnx2x_fw_command(bp, reset_code);
7448
7449                         /* if UNDI is loaded on the other port */
7450                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7451
7452                                 /* send "DONE" for previous unload */
7453                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7454
7455                                 /* unload UNDI on port 1 */
7456                                 bp->func = 1;
7457                                 bp->fw_seq =
7458                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7459                                         DRV_MSG_SEQ_NUMBER_MASK);
7460                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7461
7462                                 bnx2x_fw_command(bp, reset_code);
7463                         }
7464
7465                         /* now it's safe to release the lock */
7466                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7467
7468                         bnx2x_undi_int_disable(bp, func);
7469
7470                         /* close input traffic and wait for it */
7471                         /* Do not rcv packets to BRB */
7472                         REG_WR(bp,
7473                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7474                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7475                         /* Do not direct rcv packets that are not for MCP to
7476                          * the BRB */
7477                         REG_WR(bp,
7478                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7479                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7480                         /* clear AEU */
7481                         REG_WR(bp,
7482                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7483                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7484                         msleep(10);
7485
7486                         /* save NIG port swap info */
7487                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7488                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7489                         /* reset device */
7490                         REG_WR(bp,
7491                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7492                                0xd3ffffff);
7493                         REG_WR(bp,
7494                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7495                                0x1403);
7496                         /* take the NIG out of reset and restore swap values */
7497                         REG_WR(bp,
7498                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7499                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
7500                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7501                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7502
7503                         /* send unload done to the MCP */
7504                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7505
7506                         /* restore our func and fw_seq */
7507                         bp->func = func;
7508                         bp->fw_seq =
7509                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7510                                 DRV_MSG_SEQ_NUMBER_MASK);
7511
7512                 } else
7513                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7514         }
7515 }
7516
7517 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7518 {
7519         u32 val, val2, val3, val4, id;
7520         u16 pmc;
7521
7522         /* Get the chip revision id and number. */
7523         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7524         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7525         id = ((val & 0xffff) << 16);
7526         val = REG_RD(bp, MISC_REG_CHIP_REV);
7527         id |= ((val & 0xf) << 12);
7528         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7529         id |= ((val & 0xff) << 4);
7530         val = REG_RD(bp, MISC_REG_BOND_ID);
7531         id |= (val & 0xf);
7532         bp->common.chip_id = id;
7533         bp->link_params.chip_id = bp->common.chip_id;
7534         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7535
7536         val = (REG_RD(bp, 0x2874) & 0x55);
7537         if ((bp->common.chip_id & 0x1) ||
7538             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7539                 bp->flags |= ONE_PORT_FLAG;
7540                 BNX2X_DEV_INFO("single port device\n");
7541         }
7542
7543         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7544         bp->common.flash_size = (NVRAM_1MB_SIZE <<
7545                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
7546         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7547                        bp->common.flash_size, bp->common.flash_size);
7548
7549         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7550         bp->link_params.shmem_base = bp->common.shmem_base;
7551         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7552
7553         if (!bp->common.shmem_base ||
7554             (bp->common.shmem_base < 0xA0000) ||
7555             (bp->common.shmem_base >= 0xC0000)) {
7556                 BNX2X_DEV_INFO("MCP not active\n");
7557                 bp->flags |= NO_MCP_FLAG;
7558                 return;
7559         }
7560
7561         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7562         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7563                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7564                 BNX2X_ERR("BAD MCP validity signature\n");
7565
7566         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7567         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7568
7569         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7570                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7571                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7572
7573         bp->link_params.feature_config_flags = 0;
7574         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7575         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7576                 bp->link_params.feature_config_flags |=
7577                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7578         else
7579                 bp->link_params.feature_config_flags &=
7580                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7581
7582         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7583         bp->common.bc_ver = val;
7584         BNX2X_DEV_INFO("bc_ver %X\n", val);
7585         if (val < BNX2X_BC_VER) {
7586                 /* for now only warn
7587                  * later we might need to enforce this */
7588                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7589                           " please upgrade BC\n", BNX2X_BC_VER, val);
7590         }
7591
7592         if (BP_E1HVN(bp) == 0) {
7593                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7594                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7595         } else {
7596                 /* no WOL capability for E1HVN != 0 */
7597                 bp->flags |= NO_WOL_FLAG;
7598         }
7599         BNX2X_DEV_INFO("%sWoL capable\n",
7600                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
7601
7602         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7603         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7604         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7605         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7606
7607         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7608                val, val2, val3, val4);
7609 }
7610
7611 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7612                                                     u32 switch_cfg)
7613 {
7614         int port = BP_PORT(bp);
7615         u32 ext_phy_type;
7616
7617         switch (switch_cfg) {
7618         case SWITCH_CFG_1G:
7619                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7620
7621                 ext_phy_type =
7622                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7623                 switch (ext_phy_type) {
7624                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7625                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7626                                        ext_phy_type);
7627
7628                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7629                                                SUPPORTED_10baseT_Full |
7630                                                SUPPORTED_100baseT_Half |
7631                                                SUPPORTED_100baseT_Full |
7632                                                SUPPORTED_1000baseT_Full |
7633                                                SUPPORTED_2500baseX_Full |
7634                                                SUPPORTED_TP |
7635                                                SUPPORTED_FIBRE |
7636                                                SUPPORTED_Autoneg |
7637                                                SUPPORTED_Pause |
7638                                                SUPPORTED_Asym_Pause);
7639                         break;
7640
7641                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7642                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7643                                        ext_phy_type);
7644
7645                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7646                                                SUPPORTED_10baseT_Full |
7647                                                SUPPORTED_100baseT_Half |
7648                                                SUPPORTED_100baseT_Full |
7649                                                SUPPORTED_1000baseT_Full |
7650                                                SUPPORTED_TP |
7651                                                SUPPORTED_FIBRE |
7652                                                SUPPORTED_Autoneg |
7653                                                SUPPORTED_Pause |
7654                                                SUPPORTED_Asym_Pause);
7655                         break;
7656
7657                 default:
7658                         BNX2X_ERR("NVRAM config error. "
7659                                   "BAD SerDes ext_phy_config 0x%x\n",
7660                                   bp->link_params.ext_phy_config);
7661                         return;
7662                 }
7663
7664                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7665                                            port*0x10);
7666                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7667                 break;
7668
7669         case SWITCH_CFG_10G:
7670                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7671
7672                 ext_phy_type =
7673                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7674                 switch (ext_phy_type) {
7675                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7676                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7677                                        ext_phy_type);
7678
7679                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7680                                                SUPPORTED_10baseT_Full |
7681                                                SUPPORTED_100baseT_Half |
7682                                                SUPPORTED_100baseT_Full |
7683                                                SUPPORTED_1000baseT_Full |
7684                                                SUPPORTED_2500baseX_Full |
7685                                                SUPPORTED_10000baseT_Full |
7686                                                SUPPORTED_TP |
7687                                                SUPPORTED_FIBRE |
7688                                                SUPPORTED_Autoneg |
7689                                                SUPPORTED_Pause |
7690                                                SUPPORTED_Asym_Pause);
7691                         break;
7692
7693                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7694                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7695                                        ext_phy_type);
7696
7697                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7698                                                SUPPORTED_1000baseT_Full |
7699                                                SUPPORTED_FIBRE |
7700                                                SUPPORTED_Autoneg |
7701                                                SUPPORTED_Pause |
7702                                                SUPPORTED_Asym_Pause);
7703                         break;
7704
7705                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7706                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7707                                        ext_phy_type);
7708
7709                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7710                                                SUPPORTED_2500baseX_Full |
7711                                                SUPPORTED_1000baseT_Full |
7712                                                SUPPORTED_FIBRE |
7713                                                SUPPORTED_Autoneg |
7714                                                SUPPORTED_Pause |
7715                                                SUPPORTED_Asym_Pause);
7716                         break;
7717
7718                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7719                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7720                                        ext_phy_type);
7721
7722                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7723                                                SUPPORTED_FIBRE |
7724                                                SUPPORTED_Pause |
7725                                                SUPPORTED_Asym_Pause);
7726                         break;
7727
7728                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7729                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7730                                        ext_phy_type);
7731
7732                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7733                                                SUPPORTED_1000baseT_Full |
7734                                                SUPPORTED_FIBRE |
7735                                                SUPPORTED_Pause |
7736                                                SUPPORTED_Asym_Pause);
7737                         break;
7738
7739                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7740                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7741                                        ext_phy_type);
7742
7743                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7744                                                SUPPORTED_1000baseT_Full |
7745                                                SUPPORTED_Autoneg |
7746                                                SUPPORTED_FIBRE |
7747                                                SUPPORTED_Pause |
7748                                                SUPPORTED_Asym_Pause);
7749                         break;
7750
7751                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7752                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7753                                        ext_phy_type);
7754
7755                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7756                                                SUPPORTED_TP |
7757                                                SUPPORTED_Autoneg |
7758                                                SUPPORTED_Pause |
7759                                                SUPPORTED_Asym_Pause);
7760                         break;
7761
7762                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7763                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7764                                        ext_phy_type);
7765
7766                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7767                                                SUPPORTED_10baseT_Full |
7768                                                SUPPORTED_100baseT_Half |
7769                                                SUPPORTED_100baseT_Full |
7770                                                SUPPORTED_1000baseT_Full |
7771                                                SUPPORTED_10000baseT_Full |
7772                                                SUPPORTED_TP |
7773                                                SUPPORTED_Autoneg |
7774                                                SUPPORTED_Pause |
7775                                                SUPPORTED_Asym_Pause);
7776                         break;
7777
7778                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7779                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7780                                   bp->link_params.ext_phy_config);
7781                         break;
7782
7783                 default:
7784                         BNX2X_ERR("NVRAM config error. "
7785                                   "BAD XGXS ext_phy_config 0x%x\n",
7786                                   bp->link_params.ext_phy_config);
7787                         return;
7788                 }
7789
7790                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7791                                            port*0x18);
7792                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7793
7794                 break;
7795
7796         default:
7797                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7798                           bp->port.link_config);
7799                 return;
7800         }
7801         bp->link_params.phy_addr = bp->port.phy_addr;
7802
7803         /* mask what we support according to speed_cap_mask */
7804         if (!(bp->link_params.speed_cap_mask &
7805                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7806                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7807
7808         if (!(bp->link_params.speed_cap_mask &
7809                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7810                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7811
7812         if (!(bp->link_params.speed_cap_mask &
7813                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7814                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7815
7816         if (!(bp->link_params.speed_cap_mask &
7817                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7818                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7819
7820         if (!(bp->link_params.speed_cap_mask &
7821                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7822                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7823                                         SUPPORTED_1000baseT_Full);
7824
7825         if (!(bp->link_params.speed_cap_mask &
7826                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7827                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7828
7829         if (!(bp->link_params.speed_cap_mask &
7830                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7831                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7832
7833         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7834 }
7835
7836 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7837 {
7838         bp->link_params.req_duplex = DUPLEX_FULL;
7839
7840         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7841         case PORT_FEATURE_LINK_SPEED_AUTO:
7842                 if (bp->port.supported & SUPPORTED_Autoneg) {
7843                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7844                         bp->port.advertising = bp->port.supported;
7845                 } else {
7846                         u32 ext_phy_type =
7847                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7848
7849                         if ((ext_phy_type ==
7850                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7851                             (ext_phy_type ==
7852                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7853                                 /* force 10G, no AN */
7854                                 bp->link_params.req_line_speed = SPEED_10000;
7855                                 bp->port.advertising =
7856                                                 (ADVERTISED_10000baseT_Full |
7857                                                  ADVERTISED_FIBRE);
7858                                 break;
7859                         }
7860                         BNX2X_ERR("NVRAM config error. "
7861                                   "Invalid link_config 0x%x"
7862                                   "  Autoneg not supported\n",
7863                                   bp->port.link_config);
7864                         return;
7865                 }
7866                 break;
7867
7868         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7869                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7870                         bp->link_params.req_line_speed = SPEED_10;
7871                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7872                                                 ADVERTISED_TP);
7873                 } else {
7874                         BNX2X_ERR("NVRAM config error. "
7875                                   "Invalid link_config 0x%x"
7876                                   "  speed_cap_mask 0x%x\n",
7877                                   bp->port.link_config,
7878                                   bp->link_params.speed_cap_mask);
7879                         return;
7880                 }
7881                 break;
7882
7883         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7884                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7885                         bp->link_params.req_line_speed = SPEED_10;
7886                         bp->link_params.req_duplex = DUPLEX_HALF;
7887                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7888                                                 ADVERTISED_TP);
7889                 } else {
7890                         BNX2X_ERR("NVRAM config error. "
7891                                   "Invalid link_config 0x%x"
7892                                   "  speed_cap_mask 0x%x\n",
7893                                   bp->port.link_config,
7894                                   bp->link_params.speed_cap_mask);
7895                         return;
7896                 }
7897                 break;
7898
7899         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7900                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7901                         bp->link_params.req_line_speed = SPEED_100;
7902                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7903                                                 ADVERTISED_TP);
7904                 } else {
7905                         BNX2X_ERR("NVRAM config error. "
7906                                   "Invalid link_config 0x%x"
7907                                   "  speed_cap_mask 0x%x\n",
7908                                   bp->port.link_config,
7909                                   bp->link_params.speed_cap_mask);
7910                         return;
7911                 }
7912                 break;
7913
7914         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7915                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7916                         bp->link_params.req_line_speed = SPEED_100;
7917                         bp->link_params.req_duplex = DUPLEX_HALF;
7918                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7919                                                 ADVERTISED_TP);
7920                 } else {
7921                         BNX2X_ERR("NVRAM config error. "
7922                                   "Invalid link_config 0x%x"
7923                                   "  speed_cap_mask 0x%x\n",
7924                                   bp->port.link_config,
7925                                   bp->link_params.speed_cap_mask);
7926                         return;
7927                 }
7928                 break;
7929
7930         case PORT_FEATURE_LINK_SPEED_1G:
7931                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7932                         bp->link_params.req_line_speed = SPEED_1000;
7933                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7934                                                 ADVERTISED_TP);
7935                 } else {
7936                         BNX2X_ERR("NVRAM config error. "
7937                                   "Invalid link_config 0x%x"
7938                                   "  speed_cap_mask 0x%x\n",
7939                                   bp->port.link_config,
7940                                   bp->link_params.speed_cap_mask);
7941                         return;
7942                 }
7943                 break;
7944
7945         case PORT_FEATURE_LINK_SPEED_2_5G:
7946                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7947                         bp->link_params.req_line_speed = SPEED_2500;
7948                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7949                                                 ADVERTISED_TP);
7950                 } else {
7951                         BNX2X_ERR("NVRAM config error. "
7952                                   "Invalid link_config 0x%x"
7953                                   "  speed_cap_mask 0x%x\n",
7954                                   bp->port.link_config,
7955                                   bp->link_params.speed_cap_mask);
7956                         return;
7957                 }
7958                 break;
7959
7960         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7961         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7962         case PORT_FEATURE_LINK_SPEED_10G_KR:
7963                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7964                         bp->link_params.req_line_speed = SPEED_10000;
7965                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7966                                                 ADVERTISED_FIBRE);
7967                 } else {
7968                         BNX2X_ERR("NVRAM config error. "
7969                                   "Invalid link_config 0x%x"
7970                                   "  speed_cap_mask 0x%x\n",
7971                                   bp->port.link_config,
7972                                   bp->link_params.speed_cap_mask);
7973                         return;
7974                 }
7975                 break;
7976
7977         default:
7978                 BNX2X_ERR("NVRAM config error. "
7979                           "BAD link speed link_config 0x%x\n",
7980                           bp->port.link_config);
7981                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7982                 bp->port.advertising = bp->port.supported;
7983                 break;
7984         }
7985
7986         bp->link_params.req_flow_ctrl = (bp->port.link_config &
7987                                          PORT_FEATURE_FLOW_CONTROL_MASK);
7988         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7989             !(bp->port.supported & SUPPORTED_Autoneg))
7990                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7991
7992         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7993                        "  advertising 0x%x\n",
7994                        bp->link_params.req_line_speed,
7995                        bp->link_params.req_duplex,
7996                        bp->link_params.req_flow_ctrl, bp->port.advertising);
7997 }
7998
7999 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8000 {
8001         int port = BP_PORT(bp);
8002         u32 val, val2;
8003         u32 config;
8004         u16 i;
8005
8006         bp->link_params.bp = bp;
8007         bp->link_params.port = port;
8008
8009         bp->link_params.lane_config =
8010                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8011         bp->link_params.ext_phy_config =
8012                 SHMEM_RD(bp,
8013                          dev_info.port_hw_config[port].external_phy_config);
8014         bp->link_params.speed_cap_mask =
8015                 SHMEM_RD(bp,
8016                          dev_info.port_hw_config[port].speed_capability_mask);
8017
8018         bp->port.link_config =
8019                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8020
8021         /* Get the 4 lanes xgxs config rx and tx */
8022         for (i = 0; i < 2; i++) {
8023                 val = SHMEM_RD(bp,
8024                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8025                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8026                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8027
8028                 val = SHMEM_RD(bp,
8029                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8030                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8031                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8032         }
8033
8034         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8035         if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8036                 bp->link_params.feature_config_flags |=
8037                                 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8038         else
8039                 bp->link_params.feature_config_flags &=
8040                                 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8041
8042         /* If the device is capable of WoL, set the default state according
8043          * to the HW
8044          */
8045         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8046                    (config & PORT_FEATURE_WOL_ENABLED));
8047
8048         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
8049                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
8050                        bp->link_params.lane_config,
8051                        bp->link_params.ext_phy_config,
8052                        bp->link_params.speed_cap_mask, bp->port.link_config);
8053
8054         bp->link_params.switch_cfg = (bp->port.link_config &
8055                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
8056         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8057
8058         bnx2x_link_settings_requested(bp);
8059
8060         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8061         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8062         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8063         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8064         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8065         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8066         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8067         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8068         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8069         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8070 }
8071
8072 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8073 {
8074         int func = BP_FUNC(bp);
8075         u32 val, val2;
8076         int rc = 0;
8077
8078         bnx2x_get_common_hwinfo(bp);
8079
8080         bp->e1hov = 0;
8081         bp->e1hmf = 0;
8082         if (CHIP_IS_E1H(bp)) {
8083                 bp->mf_config =
8084                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8085
8086                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8087                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8088                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8089
8090                         bp->e1hov = val;
8091                         bp->e1hmf = 1;
8092                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
8093                                        "(0x%04x)\n",
8094                                        func, bp->e1hov, bp->e1hov);
8095                 } else {
8096                         BNX2X_DEV_INFO("single function mode\n");
8097                         if (BP_E1HVN(bp)) {
8098                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8099                                           "  aborting\n", func);
8100                                 rc = -EPERM;
8101                         }
8102                 }
8103         }
8104
8105         if (!BP_NOMCP(bp)) {
8106                 bnx2x_get_port_hwinfo(bp);
8107
8108                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8109                               DRV_MSG_SEQ_NUMBER_MASK);
8110                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8111         }
8112
8113         if (IS_E1HMF(bp)) {
8114                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8115                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8116                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8117                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8118                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8119                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8120                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8121                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8122                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8123                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8124                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8125                                ETH_ALEN);
8126                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8127                                ETH_ALEN);
8128                 }
8129
8130                 return rc;
8131         }
8132
8133         if (BP_NOMCP(bp)) {
8134                 /* only supposed to happen on emulation/FPGA */
8135                 BNX2X_ERR("warning random MAC workaround active\n");
8136                 random_ether_addr(bp->dev->dev_addr);
8137                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8138         }
8139
8140         return rc;
8141 }
8142
8143 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8144 {
8145         int func = BP_FUNC(bp);
8146         int timer_interval;
8147         int rc;
8148
8149         /* Disable interrupt handling until HW is initialized */
8150         atomic_set(&bp->intr_sem, 1);
8151
8152         mutex_init(&bp->port.phy_mutex);
8153
8154         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8155         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8156
8157         rc = bnx2x_get_hwinfo(bp);
8158
8159         /* need to reset chip if undi was active */
8160         if (!BP_NOMCP(bp))
8161                 bnx2x_undi_unload(bp);
8162
8163         if (CHIP_REV_IS_FPGA(bp))
8164                 printk(KERN_ERR PFX "FPGA detected\n");
8165
8166         if (BP_NOMCP(bp) && (func == 0))
8167                 printk(KERN_ERR PFX
8168                        "MCP disabled, must load devices in order!\n");
8169
8170         /* Set multi queue mode */
8171         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8172             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8173                 printk(KERN_ERR PFX
8174                       "Multi disabled since int_mode requested is not MSI-X\n");
8175                 multi_mode = ETH_RSS_MODE_DISABLED;
8176         }
8177         bp->multi_mode = multi_mode;
8178
8179
8180         /* Set TPA flags */
8181         if (disable_tpa) {
8182                 bp->flags &= ~TPA_ENABLE_FLAG;
8183                 bp->dev->features &= ~NETIF_F_LRO;
8184         } else {
8185                 bp->flags |= TPA_ENABLE_FLAG;
8186                 bp->dev->features |= NETIF_F_LRO;
8187         }
8188
8189         bp->mrrs = mrrs;
8190
8191         bp->tx_ring_size = MAX_TX_AVAIL;
8192         bp->rx_ring_size = MAX_RX_AVAIL;
8193
8194         bp->rx_csum = 1;
8195
8196         bp->tx_ticks = 50;
8197         bp->rx_ticks = 25;
8198
8199         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8200         bp->current_interval = (poll ? poll : timer_interval);
8201
8202         init_timer(&bp->timer);
8203         bp->timer.expires = jiffies + bp->current_interval;
8204         bp->timer.data = (unsigned long) bp;
8205         bp->timer.function = bnx2x_timer;
8206
8207         return rc;
8208 }
8209
8210 /*
8211  * ethtool service functions
8212  */
8213
8214 /* All ethtool functions called with rtnl_lock */
8215
8216 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8217 {
8218         struct bnx2x *bp = netdev_priv(dev);
8219
8220         cmd->supported = bp->port.supported;
8221         cmd->advertising = bp->port.advertising;
8222
8223         if (netif_carrier_ok(dev)) {
8224                 cmd->speed = bp->link_vars.line_speed;
8225                 cmd->duplex = bp->link_vars.duplex;
8226         } else {
8227                 cmd->speed = bp->link_params.req_line_speed;
8228                 cmd->duplex = bp->link_params.req_duplex;
8229         }
8230         if (IS_E1HMF(bp)) {
8231                 u16 vn_max_rate;
8232
8233                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8234                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8235                 if (vn_max_rate < cmd->speed)
8236                         cmd->speed = vn_max_rate;
8237         }
8238
8239         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8240                 u32 ext_phy_type =
8241                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8242
8243                 switch (ext_phy_type) {
8244                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8245                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8246                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8247                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8248                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8249                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8250                         cmd->port = PORT_FIBRE;
8251                         break;
8252
8253                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8254                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8255                         cmd->port = PORT_TP;
8256                         break;
8257
8258                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8259                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8260                                   bp->link_params.ext_phy_config);
8261                         break;
8262
8263                 default:
8264                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8265                            bp->link_params.ext_phy_config);
8266                         break;
8267                 }
8268         } else
8269                 cmd->port = PORT_TP;
8270
8271         cmd->phy_address = bp->port.phy_addr;
8272         cmd->transceiver = XCVR_INTERNAL;
8273
8274         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8275                 cmd->autoneg = AUTONEG_ENABLE;
8276         else
8277                 cmd->autoneg = AUTONEG_DISABLE;
8278
8279         cmd->maxtxpkt = 0;
8280         cmd->maxrxpkt = 0;
8281
8282         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8283            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8284            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8285            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8286            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8287            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8288            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8289
8290         return 0;
8291 }
8292
8293 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8294 {
8295         struct bnx2x *bp = netdev_priv(dev);
8296         u32 advertising;
8297
8298         if (IS_E1HMF(bp))
8299                 return 0;
8300
8301         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8302            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8303            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8304            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8305            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8306            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8307            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8308
8309         if (cmd->autoneg == AUTONEG_ENABLE) {
8310                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8311                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8312                         return -EINVAL;
8313                 }
8314
8315                 /* advertise the requested speed and duplex if supported */
8316                 cmd->advertising &= bp->port.supported;
8317
8318                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8319                 bp->link_params.req_duplex = DUPLEX_FULL;
8320                 bp->port.advertising |= (ADVERTISED_Autoneg |
8321                                          cmd->advertising);
8322
8323         } else { /* forced speed */
8324                 /* advertise the requested speed and duplex if supported */
8325                 switch (cmd->speed) {
8326                 case SPEED_10:
8327                         if (cmd->duplex == DUPLEX_FULL) {
8328                                 if (!(bp->port.supported &
8329                                       SUPPORTED_10baseT_Full)) {
8330                                         DP(NETIF_MSG_LINK,
8331                                            "10M full not supported\n");
8332                                         return -EINVAL;
8333                                 }
8334
8335                                 advertising = (ADVERTISED_10baseT_Full |
8336                                                ADVERTISED_TP);
8337                         } else {
8338                                 if (!(bp->port.supported &
8339                                       SUPPORTED_10baseT_Half)) {
8340                                         DP(NETIF_MSG_LINK,
8341                                            "10M half not supported\n");
8342                                         return -EINVAL;
8343                                 }
8344
8345                                 advertising = (ADVERTISED_10baseT_Half |
8346                                                ADVERTISED_TP);
8347                         }
8348                         break;
8349
8350                 case SPEED_100:
8351                         if (cmd->duplex == DUPLEX_FULL) {
8352                                 if (!(bp->port.supported &
8353                                                 SUPPORTED_100baseT_Full)) {
8354                                         DP(NETIF_MSG_LINK,
8355                                            "100M full not supported\n");
8356                                         return -EINVAL;
8357                                 }
8358
8359                                 advertising = (ADVERTISED_100baseT_Full |
8360                                                ADVERTISED_TP);
8361                         } else {
8362                                 if (!(bp->port.supported &
8363                                                 SUPPORTED_100baseT_Half)) {
8364                                         DP(NETIF_MSG_LINK,
8365                                            "100M half not supported\n");
8366                                         return -EINVAL;
8367                                 }
8368
8369                                 advertising = (ADVERTISED_100baseT_Half |
8370                                                ADVERTISED_TP);
8371                         }
8372                         break;
8373
8374                 case SPEED_1000:
8375                         if (cmd->duplex != DUPLEX_FULL) {
8376                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
8377                                 return -EINVAL;
8378                         }
8379
8380                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8381                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
8382                                 return -EINVAL;
8383                         }
8384
8385                         advertising = (ADVERTISED_1000baseT_Full |
8386                                        ADVERTISED_TP);
8387                         break;
8388
8389                 case SPEED_2500:
8390                         if (cmd->duplex != DUPLEX_FULL) {
8391                                 DP(NETIF_MSG_LINK,
8392                                    "2.5G half not supported\n");
8393                                 return -EINVAL;
8394                         }
8395
8396                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8397                                 DP(NETIF_MSG_LINK,
8398                                    "2.5G full not supported\n");
8399                                 return -EINVAL;
8400                         }
8401
8402                         advertising = (ADVERTISED_2500baseX_Full |
8403                                        ADVERTISED_TP);
8404                         break;
8405
8406                 case SPEED_10000:
8407                         if (cmd->duplex != DUPLEX_FULL) {
8408                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
8409                                 return -EINVAL;
8410                         }
8411
8412                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8413                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
8414                                 return -EINVAL;
8415                         }
8416
8417                         advertising = (ADVERTISED_10000baseT_Full |
8418                                        ADVERTISED_FIBRE);
8419                         break;
8420
8421                 default:
8422                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
8423                         return -EINVAL;
8424                 }
8425
8426                 bp->link_params.req_line_speed = cmd->speed;
8427                 bp->link_params.req_duplex = cmd->duplex;
8428                 bp->port.advertising = advertising;
8429         }
8430
8431         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8432            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
8433            bp->link_params.req_line_speed, bp->link_params.req_duplex,
8434            bp->port.advertising);
8435
8436         if (netif_running(dev)) {
8437                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8438                 bnx2x_link_set(bp);
8439         }
8440
8441         return 0;
8442 }
8443
8444 #define PHY_FW_VER_LEN                  10
8445
8446 static void bnx2x_get_drvinfo(struct net_device *dev,
8447                               struct ethtool_drvinfo *info)
8448 {
8449         struct bnx2x *bp = netdev_priv(dev);
8450         u8 phy_fw_ver[PHY_FW_VER_LEN];
8451
8452         strcpy(info->driver, DRV_MODULE_NAME);
8453         strcpy(info->version, DRV_MODULE_VERSION);
8454
8455         phy_fw_ver[0] = '\0';
8456         if (bp->port.pmf) {
8457                 bnx2x_acquire_phy_lock(bp);
8458                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8459                                              (bp->state != BNX2X_STATE_CLOSED),
8460                                              phy_fw_ver, PHY_FW_VER_LEN);
8461                 bnx2x_release_phy_lock(bp);
8462         }
8463
8464         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8465                  (bp->common.bc_ver & 0xff0000) >> 16,
8466                  (bp->common.bc_ver & 0xff00) >> 8,
8467                  (bp->common.bc_ver & 0xff),
8468                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8469         strcpy(info->bus_info, pci_name(bp->pdev));
8470         info->n_stats = BNX2X_NUM_STATS;
8471         info->testinfo_len = BNX2X_NUM_TESTS;
8472         info->eedump_len = bp->common.flash_size;
8473         info->regdump_len = 0;
8474 }
8475
8476 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8477 {
8478         struct bnx2x *bp = netdev_priv(dev);
8479
8480         if (bp->flags & NO_WOL_FLAG) {
8481                 wol->supported = 0;
8482                 wol->wolopts = 0;
8483         } else {
8484                 wol->supported = WAKE_MAGIC;
8485                 if (bp->wol)
8486                         wol->wolopts = WAKE_MAGIC;
8487                 else
8488                         wol->wolopts = 0;
8489         }
8490         memset(&wol->sopass, 0, sizeof(wol->sopass));
8491 }
8492
8493 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8494 {
8495         struct bnx2x *bp = netdev_priv(dev);
8496
8497         if (wol->wolopts & ~WAKE_MAGIC)
8498                 return -EINVAL;
8499
8500         if (wol->wolopts & WAKE_MAGIC) {
8501                 if (bp->flags & NO_WOL_FLAG)
8502                         return -EINVAL;
8503
8504                 bp->wol = 1;
8505         } else
8506                 bp->wol = 0;
8507
8508         return 0;
8509 }
8510
8511 static u32 bnx2x_get_msglevel(struct net_device *dev)
8512 {
8513         struct bnx2x *bp = netdev_priv(dev);
8514
8515         return bp->msglevel;
8516 }
8517
8518 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8519 {
8520         struct bnx2x *bp = netdev_priv(dev);
8521
8522         if (capable(CAP_NET_ADMIN))
8523                 bp->msglevel = level;
8524 }
8525
8526 static int bnx2x_nway_reset(struct net_device *dev)
8527 {
8528         struct bnx2x *bp = netdev_priv(dev);
8529
8530         if (!bp->port.pmf)
8531                 return 0;
8532
8533         if (netif_running(dev)) {
8534                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8535                 bnx2x_link_set(bp);
8536         }
8537
8538         return 0;
8539 }
8540
8541 static int bnx2x_get_eeprom_len(struct net_device *dev)
8542 {
8543         struct bnx2x *bp = netdev_priv(dev);
8544
8545         return bp->common.flash_size;
8546 }
8547
8548 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8549 {
8550         int port = BP_PORT(bp);
8551         int count, i;
8552         u32 val = 0;
8553
8554         /* adjust timeout for emulation/FPGA */
8555         count = NVRAM_TIMEOUT_COUNT;
8556         if (CHIP_REV_IS_SLOW(bp))
8557                 count *= 100;
8558
8559         /* request access to nvram interface */
8560         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8561                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8562
8563         for (i = 0; i < count*10; i++) {
8564                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8565                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8566                         break;
8567
8568                 udelay(5);
8569         }
8570
8571         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8572                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8573                 return -EBUSY;
8574         }
8575
8576         return 0;
8577 }
8578
8579 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8580 {
8581         int port = BP_PORT(bp);
8582         int count, i;
8583         u32 val = 0;
8584
8585         /* adjust timeout for emulation/FPGA */
8586         count = NVRAM_TIMEOUT_COUNT;
8587         if (CHIP_REV_IS_SLOW(bp))
8588                 count *= 100;
8589
8590         /* relinquish nvram interface */
8591         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8592                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8593
8594         for (i = 0; i < count*10; i++) {
8595                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8596                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8597                         break;
8598
8599                 udelay(5);
8600         }
8601
8602         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8603                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8604                 return -EBUSY;
8605         }
8606
8607         return 0;
8608 }
8609
8610 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8611 {
8612         u32 val;
8613
8614         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8615
8616         /* enable both bits, even on read */
8617         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8618                (val | MCPR_NVM_ACCESS_ENABLE_EN |
8619                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
8620 }
8621
8622 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8623 {
8624         u32 val;
8625
8626         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8627
8628         /* disable both bits, even after read */
8629         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8630                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8631                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8632 }
8633
8634 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
8635                                   u32 cmd_flags)
8636 {
8637         int count, i, rc;
8638         u32 val;
8639
8640         /* build the command word */
8641         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8642
8643         /* need to clear DONE bit separately */
8644         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8645
8646         /* address of the NVRAM to read from */
8647         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8648                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8649
8650         /* issue a read command */
8651         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8652
8653         /* adjust timeout for emulation/FPGA */
8654         count = NVRAM_TIMEOUT_COUNT;
8655         if (CHIP_REV_IS_SLOW(bp))
8656                 count *= 100;
8657
8658         /* wait for completion */
8659         *ret_val = 0;
8660         rc = -EBUSY;
8661         for (i = 0; i < count; i++) {
8662                 udelay(5);
8663                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8664
8665                 if (val & MCPR_NVM_COMMAND_DONE) {
8666                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8667                         /* we read nvram data in cpu order
8668                          * but ethtool sees it as an array of bytes
8669                          * converting to big-endian will do the work */
8670                         *ret_val = cpu_to_be32(val);
8671                         rc = 0;
8672                         break;
8673                 }
8674         }
8675
8676         return rc;
8677 }
8678
8679 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8680                             int buf_size)
8681 {
8682         int rc;
8683         u32 cmd_flags;
8684         __be32 val;
8685
8686         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8687                 DP(BNX2X_MSG_NVM,
8688                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8689                    offset, buf_size);
8690                 return -EINVAL;
8691         }
8692
8693         if (offset + buf_size > bp->common.flash_size) {
8694                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8695                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8696                    offset, buf_size, bp->common.flash_size);
8697                 return -EINVAL;
8698         }
8699
8700         /* request access to nvram interface */
8701         rc = bnx2x_acquire_nvram_lock(bp);
8702         if (rc)
8703                 return rc;
8704
8705         /* enable access to nvram interface */
8706         bnx2x_enable_nvram_access(bp);
8707
8708         /* read the first word(s) */
8709         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8710         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8711                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8712                 memcpy(ret_buf, &val, 4);
8713
8714                 /* advance to the next dword */
8715                 offset += sizeof(u32);
8716                 ret_buf += sizeof(u32);
8717                 buf_size -= sizeof(u32);
8718                 cmd_flags = 0;
8719         }
8720
8721         if (rc == 0) {
8722                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8723                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8724                 memcpy(ret_buf, &val, 4);
8725         }
8726
8727         /* disable access to nvram interface */
8728         bnx2x_disable_nvram_access(bp);
8729         bnx2x_release_nvram_lock(bp);
8730
8731         return rc;
8732 }
8733
8734 static int bnx2x_get_eeprom(struct net_device *dev,
8735                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8736 {
8737         struct bnx2x *bp = netdev_priv(dev);
8738         int rc;
8739
8740         if (!netif_running(dev))
8741                 return -EAGAIN;
8742
8743         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8744            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8745            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8746            eeprom->len, eeprom->len);
8747
8748         /* parameters already validated in ethtool_get_eeprom */
8749
8750         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8751
8752         return rc;
8753 }
8754
8755 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8756                                    u32 cmd_flags)
8757 {
8758         int count, i, rc;
8759
8760         /* build the command word */
8761         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8762
8763         /* need to clear DONE bit separately */
8764         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8765
8766         /* write the data */
8767         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8768
8769         /* address of the NVRAM to write to */
8770         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8771                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8772
8773         /* issue the write command */
8774         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8775
8776         /* adjust timeout for emulation/FPGA */
8777         count = NVRAM_TIMEOUT_COUNT;
8778         if (CHIP_REV_IS_SLOW(bp))
8779                 count *= 100;
8780
8781         /* wait for completion */
8782         rc = -EBUSY;
8783         for (i = 0; i < count; i++) {
8784                 udelay(5);
8785                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8786                 if (val & MCPR_NVM_COMMAND_DONE) {
8787                         rc = 0;
8788                         break;
8789                 }
8790         }
8791
8792         return rc;
8793 }
8794
8795 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8796
8797 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8798                               int buf_size)
8799 {
8800         int rc;
8801         u32 cmd_flags;
8802         u32 align_offset;
8803         __be32 val;
8804
8805         if (offset + buf_size > bp->common.flash_size) {
8806                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8807                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8808                    offset, buf_size, bp->common.flash_size);
8809                 return -EINVAL;
8810         }
8811
8812         /* request access to nvram interface */
8813         rc = bnx2x_acquire_nvram_lock(bp);
8814         if (rc)
8815                 return rc;
8816
8817         /* enable access to nvram interface */
8818         bnx2x_enable_nvram_access(bp);
8819
8820         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8821         align_offset = (offset & ~0x03);
8822         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8823
8824         if (rc == 0) {
8825                 val &= ~(0xff << BYTE_OFFSET(offset));
8826                 val |= (*data_buf << BYTE_OFFSET(offset));
8827
8828                 /* nvram data is returned as an array of bytes
8829                  * convert it back to cpu order */
8830                 val = be32_to_cpu(val);
8831
8832                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8833                                              cmd_flags);
8834         }
8835
8836         /* disable access to nvram interface */
8837         bnx2x_disable_nvram_access(bp);
8838         bnx2x_release_nvram_lock(bp);
8839
8840         return rc;
8841 }
8842
8843 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8844                              int buf_size)
8845 {
8846         int rc;
8847         u32 cmd_flags;
8848         u32 val;
8849         u32 written_so_far;
8850
8851         if (buf_size == 1)      /* ethtool */
8852                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8853
8854         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8855                 DP(BNX2X_MSG_NVM,
8856                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8857                    offset, buf_size);
8858                 return -EINVAL;
8859         }
8860
8861         if (offset + buf_size > bp->common.flash_size) {
8862                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8863                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8864                    offset, buf_size, bp->common.flash_size);
8865                 return -EINVAL;
8866         }
8867
8868         /* request access to nvram interface */
8869         rc = bnx2x_acquire_nvram_lock(bp);
8870         if (rc)
8871                 return rc;
8872
8873         /* enable access to nvram interface */
8874         bnx2x_enable_nvram_access(bp);
8875
8876         written_so_far = 0;
8877         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8878         while ((written_so_far < buf_size) && (rc == 0)) {
8879                 if (written_so_far == (buf_size - sizeof(u32)))
8880                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8881                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8882                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8883                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8884                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8885
8886                 memcpy(&val, data_buf, 4);
8887
8888                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8889
8890                 /* advance to the next dword */
8891                 offset += sizeof(u32);
8892                 data_buf += sizeof(u32);
8893                 written_so_far += sizeof(u32);
8894                 cmd_flags = 0;
8895         }
8896
8897         /* disable access to nvram interface */
8898         bnx2x_disable_nvram_access(bp);
8899         bnx2x_release_nvram_lock(bp);
8900
8901         return rc;
8902 }
8903
8904 static int bnx2x_set_eeprom(struct net_device *dev,
8905                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8906 {
8907         struct bnx2x *bp = netdev_priv(dev);
8908         int rc;
8909
8910         if (!netif_running(dev))
8911                 return -EAGAIN;
8912
8913         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8914            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8915            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8916            eeprom->len, eeprom->len);
8917
8918         /* parameters already validated in ethtool_set_eeprom */
8919
8920         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8921         if (eeprom->magic == 0x00504859)
8922                 if (bp->port.pmf) {
8923
8924                         bnx2x_acquire_phy_lock(bp);
8925                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8926                                              bp->link_params.ext_phy_config,
8927                                              (bp->state != BNX2X_STATE_CLOSED),
8928                                              eebuf, eeprom->len);
8929                         if ((bp->state == BNX2X_STATE_OPEN) ||
8930                             (bp->state == BNX2X_STATE_DISABLED)) {
8931                                 rc |= bnx2x_link_reset(&bp->link_params,
8932                                                        &bp->link_vars, 1);
8933                                 rc |= bnx2x_phy_init(&bp->link_params,
8934                                                      &bp->link_vars);
8935                         }
8936                         bnx2x_release_phy_lock(bp);
8937
8938                 } else /* Only the PMF can access the PHY */
8939                         return -EINVAL;
8940         else
8941                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8942
8943         return rc;
8944 }
8945
8946 static int bnx2x_get_coalesce(struct net_device *dev,
8947                               struct ethtool_coalesce *coal)
8948 {
8949         struct bnx2x *bp = netdev_priv(dev);
8950
8951         memset(coal, 0, sizeof(struct ethtool_coalesce));
8952
8953         coal->rx_coalesce_usecs = bp->rx_ticks;
8954         coal->tx_coalesce_usecs = bp->tx_ticks;
8955
8956         return 0;
8957 }
8958
8959 static int bnx2x_set_coalesce(struct net_device *dev,
8960                               struct ethtool_coalesce *coal)
8961 {
8962         struct bnx2x *bp = netdev_priv(dev);
8963
8964         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8965         if (bp->rx_ticks > 3000)
8966                 bp->rx_ticks = 3000;
8967
8968         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8969         if (bp->tx_ticks > 0x3000)
8970                 bp->tx_ticks = 0x3000;
8971
8972         if (netif_running(dev))
8973                 bnx2x_update_coalesce(bp);
8974
8975         return 0;
8976 }
8977
8978 static void bnx2x_get_ringparam(struct net_device *dev,
8979                                 struct ethtool_ringparam *ering)
8980 {
8981         struct bnx2x *bp = netdev_priv(dev);
8982
8983         ering->rx_max_pending = MAX_RX_AVAIL;
8984         ering->rx_mini_max_pending = 0;
8985         ering->rx_jumbo_max_pending = 0;
8986
8987         ering->rx_pending = bp->rx_ring_size;
8988         ering->rx_mini_pending = 0;
8989         ering->rx_jumbo_pending = 0;
8990
8991         ering->tx_max_pending = MAX_TX_AVAIL;
8992         ering->tx_pending = bp->tx_ring_size;
8993 }
8994
8995 static int bnx2x_set_ringparam(struct net_device *dev,
8996                                struct ethtool_ringparam *ering)
8997 {
8998         struct bnx2x *bp = netdev_priv(dev);
8999         int rc = 0;
9000
9001         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9002             (ering->tx_pending > MAX_TX_AVAIL) ||
9003             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9004                 return -EINVAL;
9005
9006         bp->rx_ring_size = ering->rx_pending;
9007         bp->tx_ring_size = ering->tx_pending;
9008
9009         if (netif_running(dev)) {
9010                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9011                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9012         }
9013
9014         return rc;
9015 }
9016
9017 static void bnx2x_get_pauseparam(struct net_device *dev,
9018                                  struct ethtool_pauseparam *epause)
9019 {
9020         struct bnx2x *bp = netdev_priv(dev);
9021
9022         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9023                            BNX2X_FLOW_CTRL_AUTO) &&
9024                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9025
9026         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9027                             BNX2X_FLOW_CTRL_RX);
9028         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9029                             BNX2X_FLOW_CTRL_TX);
9030
9031         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9032            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9033            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9034 }
9035
9036 static int bnx2x_set_pauseparam(struct net_device *dev,
9037                                 struct ethtool_pauseparam *epause)
9038 {
9039         struct bnx2x *bp = netdev_priv(dev);
9040
9041         if (IS_E1HMF(bp))
9042                 return 0;
9043
9044         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9045            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9046            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9047
9048         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9049
9050         if (epause->rx_pause)
9051                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9052
9053         if (epause->tx_pause)
9054                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9055
9056         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9057                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9058
9059         if (epause->autoneg) {
9060                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9061                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
9062                         return -EINVAL;
9063                 }
9064
9065                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9066                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9067         }
9068
9069         DP(NETIF_MSG_LINK,
9070            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9071
9072         if (netif_running(dev)) {
9073                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9074                 bnx2x_link_set(bp);
9075         }
9076
9077         return 0;
9078 }
9079
9080 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9081 {
9082         struct bnx2x *bp = netdev_priv(dev);
9083         int changed = 0;
9084         int rc = 0;
9085
9086         /* TPA requires Rx CSUM offloading */
9087         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9088                 if (!(dev->features & NETIF_F_LRO)) {
9089                         dev->features |= NETIF_F_LRO;
9090                         bp->flags |= TPA_ENABLE_FLAG;
9091                         changed = 1;
9092                 }
9093
9094         } else if (dev->features & NETIF_F_LRO) {
9095                 dev->features &= ~NETIF_F_LRO;
9096                 bp->flags &= ~TPA_ENABLE_FLAG;
9097                 changed = 1;
9098         }
9099
9100         if (changed && netif_running(dev)) {
9101                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9102                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9103         }
9104
9105         return rc;
9106 }
9107
9108 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9109 {
9110         struct bnx2x *bp = netdev_priv(dev);
9111
9112         return bp->rx_csum;
9113 }
9114
9115 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9116 {
9117         struct bnx2x *bp = netdev_priv(dev);
9118         int rc = 0;
9119
9120         bp->rx_csum = data;
9121
9122         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9123            TPA'ed packets will be discarded due to wrong TCP CSUM */
9124         if (!data) {
9125                 u32 flags = ethtool_op_get_flags(dev);
9126
9127                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9128         }
9129
9130         return rc;
9131 }
9132
9133 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9134 {
9135         if (data) {
9136                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9137                 dev->features |= NETIF_F_TSO6;
9138         } else {
9139                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9140                 dev->features &= ~NETIF_F_TSO6;
9141         }
9142
9143         return 0;
9144 }
9145
9146 static const struct {
9147         char string[ETH_GSTRING_LEN];
9148 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9149         { "register_test (offline)" },
9150         { "memory_test (offline)" },
9151         { "loopback_test (offline)" },
9152         { "nvram_test (online)" },
9153         { "interrupt_test (online)" },
9154         { "link_test (online)" },
9155         { "idle check (online)" }
9156 };
9157
9158 static int bnx2x_self_test_count(struct net_device *dev)
9159 {
9160         return BNX2X_NUM_TESTS;
9161 }
9162
9163 static int bnx2x_test_registers(struct bnx2x *bp)
9164 {
9165         int idx, i, rc = -ENODEV;
9166         u32 wr_val = 0;
9167         int port = BP_PORT(bp);
9168         static const struct {
9169                 u32  offset0;
9170                 u32  offset1;
9171                 u32  mask;
9172         } reg_tbl[] = {
9173 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
9174                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
9175                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
9176                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
9177                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
9178                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
9179                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
9180                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
9181                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
9182                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
9183 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
9184                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
9185                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
9186                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
9187                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
9188                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9189                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
9190                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
9191                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
9192                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
9193 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
9194                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
9195                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
9196                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
9197                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
9198                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
9199                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
9200                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
9201                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
9202                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
9203 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
9204                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
9205                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
9206                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
9207                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9208                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
9209                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9210                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
9211
9212                 { 0xffffffff, 0, 0x00000000 }
9213         };
9214
9215         if (!netif_running(bp->dev))
9216                 return rc;
9217
9218         /* Repeat the test twice:
9219            First by writing 0x00000000, second by writing 0xffffffff */
9220         for (idx = 0; idx < 2; idx++) {
9221
9222                 switch (idx) {
9223                 case 0:
9224                         wr_val = 0;
9225                         break;
9226                 case 1:
9227                         wr_val = 0xffffffff;
9228                         break;
9229                 }
9230
9231                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9232                         u32 offset, mask, save_val, val;
9233
9234                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9235                         mask = reg_tbl[i].mask;
9236
9237                         save_val = REG_RD(bp, offset);
9238
9239                         REG_WR(bp, offset, wr_val);
9240                         val = REG_RD(bp, offset);
9241
9242                         /* Restore the original register's value */
9243                         REG_WR(bp, offset, save_val);
9244
9245                         /* verify that value is as expected value */
9246                         if ((val & mask) != (wr_val & mask))
9247                                 goto test_reg_exit;
9248                 }
9249         }
9250
9251         rc = 0;
9252
9253 test_reg_exit:
9254         return rc;
9255 }
9256
9257 static int bnx2x_test_memory(struct bnx2x *bp)
9258 {
9259         int i, j, rc = -ENODEV;
9260         u32 val;
9261         static const struct {
9262                 u32 offset;
9263                 int size;
9264         } mem_tbl[] = {
9265                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
9266                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9267                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
9268                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
9269                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
9270                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
9271                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
9272
9273                 { 0xffffffff, 0 }
9274         };
9275         static const struct {
9276                 char *name;
9277                 u32 offset;
9278                 u32 e1_mask;
9279                 u32 e1h_mask;
9280         } prty_tbl[] = {
9281                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
9282                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
9283                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
9284                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
9285                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
9286                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
9287
9288                 { NULL, 0xffffffff, 0, 0 }
9289         };
9290
9291         if (!netif_running(bp->dev))
9292                 return rc;
9293
9294         /* Go through all the memories */
9295         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9296                 for (j = 0; j < mem_tbl[i].size; j++)
9297                         REG_RD(bp, mem_tbl[i].offset + j*4);
9298
9299         /* Check the parity status */
9300         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9301                 val = REG_RD(bp, prty_tbl[i].offset);
9302                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9303                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9304                         DP(NETIF_MSG_HW,
9305                            "%s is 0x%x\n", prty_tbl[i].name, val);
9306                         goto test_mem_exit;
9307                 }
9308         }
9309
9310         rc = 0;
9311
9312 test_mem_exit:
9313         return rc;
9314 }
9315
9316 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9317 {
9318         int cnt = 1000;
9319
9320         if (link_up)
9321                 while (bnx2x_link_test(bp) && cnt--)
9322                         msleep(10);
9323 }
9324
9325 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9326 {
9327         unsigned int pkt_size, num_pkts, i;
9328         struct sk_buff *skb;
9329         unsigned char *packet;
9330         struct bnx2x_fastpath *fp = &bp->fp[0];
9331         u16 tx_start_idx, tx_idx;
9332         u16 rx_start_idx, rx_idx;
9333         u16 pkt_prod;
9334         struct sw_tx_bd *tx_buf;
9335         struct eth_tx_bd *tx_bd;
9336         dma_addr_t mapping;
9337         union eth_rx_cqe *cqe;
9338         u8 cqe_fp_flags;
9339         struct sw_rx_bd *rx_buf;
9340         u16 len;
9341         int rc = -ENODEV;
9342
9343         /* check the loopback mode */
9344         switch (loopback_mode) {
9345         case BNX2X_PHY_LOOPBACK:
9346                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9347                         return -EINVAL;
9348                 break;
9349         case BNX2X_MAC_LOOPBACK:
9350                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9351                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9352                 break;
9353         default:
9354                 return -EINVAL;
9355         }
9356
9357         /* prepare the loopback packet */
9358         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9359                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9360         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9361         if (!skb) {
9362                 rc = -ENOMEM;
9363                 goto test_loopback_exit;
9364         }
9365         packet = skb_put(skb, pkt_size);
9366         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9367         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9368         for (i = ETH_HLEN; i < pkt_size; i++)
9369                 packet[i] = (unsigned char) (i & 0xff);
9370
9371         /* send the loopback packet */
9372         num_pkts = 0;
9373         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9374         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9375
9376         pkt_prod = fp->tx_pkt_prod++;
9377         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9378         tx_buf->first_bd = fp->tx_bd_prod;
9379         tx_buf->skb = skb;
9380
9381         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9382         mapping = pci_map_single(bp->pdev, skb->data,
9383                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9384         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9385         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9386         tx_bd->nbd = cpu_to_le16(1);
9387         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9388         tx_bd->vlan = cpu_to_le16(pkt_prod);
9389         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9390                                        ETH_TX_BD_FLAGS_END_BD);
9391         tx_bd->general_data = ((UNICAST_ADDRESS <<
9392                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9393
9394         wmb();
9395
9396         le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
9397         mb(); /* FW restriction: must not reorder writing nbd and packets */
9398         le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
9399         DOORBELL(bp, fp->index, 0);
9400
9401         mmiowb();
9402
9403         num_pkts++;
9404         fp->tx_bd_prod++;
9405         bp->dev->trans_start = jiffies;
9406
9407         udelay(100);
9408
9409         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9410         if (tx_idx != tx_start_idx + num_pkts)
9411                 goto test_loopback_exit;
9412
9413         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9414         if (rx_idx != rx_start_idx + num_pkts)
9415                 goto test_loopback_exit;
9416
9417         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9418         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9419         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9420                 goto test_loopback_rx_exit;
9421
9422         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9423         if (len != pkt_size)
9424                 goto test_loopback_rx_exit;
9425
9426         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9427         skb = rx_buf->skb;
9428         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9429         for (i = ETH_HLEN; i < pkt_size; i++)
9430                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9431                         goto test_loopback_rx_exit;
9432
9433         rc = 0;
9434
9435 test_loopback_rx_exit:
9436
9437         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9438         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9439         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9440         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9441
9442         /* Update producers */
9443         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9444                              fp->rx_sge_prod);
9445
9446 test_loopback_exit:
9447         bp->link_params.loopback_mode = LOOPBACK_NONE;
9448
9449         return rc;
9450 }
9451
9452 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9453 {
9454         int rc = 0, res;
9455
9456         if (!netif_running(bp->dev))
9457                 return BNX2X_LOOPBACK_FAILED;
9458
9459         bnx2x_netif_stop(bp, 1);
9460         bnx2x_acquire_phy_lock(bp);
9461
9462         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9463         if (res) {
9464                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
9465                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9466         }
9467
9468         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9469         if (res) {
9470                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
9471                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9472         }
9473
9474         bnx2x_release_phy_lock(bp);
9475         bnx2x_netif_start(bp);
9476
9477         return rc;
9478 }
9479
9480 #define CRC32_RESIDUAL                  0xdebb20e3
9481
9482 static int bnx2x_test_nvram(struct bnx2x *bp)
9483 {
9484         static const struct {
9485                 int offset;
9486                 int size;
9487         } nvram_tbl[] = {
9488                 {     0,  0x14 }, /* bootstrap */
9489                 {  0x14,  0xec }, /* dir */
9490                 { 0x100, 0x350 }, /* manuf_info */
9491                 { 0x450,  0xf0 }, /* feature_info */
9492                 { 0x640,  0x64 }, /* upgrade_key_info */
9493                 { 0x6a4,  0x64 },
9494                 { 0x708,  0x70 }, /* manuf_key_info */
9495                 { 0x778,  0x70 },
9496                 {     0,     0 }
9497         };
9498         __be32 buf[0x350 / 4];
9499         u8 *data = (u8 *)buf;
9500         int i, rc;
9501         u32 magic, csum;
9502
9503         rc = bnx2x_nvram_read(bp, 0, data, 4);
9504         if (rc) {
9505                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9506                 goto test_nvram_exit;
9507         }
9508
9509         magic = be32_to_cpu(buf[0]);
9510         if (magic != 0x669955aa) {
9511                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9512                 rc = -ENODEV;
9513                 goto test_nvram_exit;
9514         }
9515
9516         for (i = 0; nvram_tbl[i].size; i++) {
9517
9518                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9519                                       nvram_tbl[i].size);
9520                 if (rc) {
9521                         DP(NETIF_MSG_PROBE,
9522                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
9523                         goto test_nvram_exit;
9524                 }
9525
9526                 csum = ether_crc_le(nvram_tbl[i].size, data);
9527                 if (csum != CRC32_RESIDUAL) {
9528                         DP(NETIF_MSG_PROBE,
9529                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9530                         rc = -ENODEV;
9531                         goto test_nvram_exit;
9532                 }
9533         }
9534
9535 test_nvram_exit:
9536         return rc;
9537 }
9538
9539 static int bnx2x_test_intr(struct bnx2x *bp)
9540 {
9541         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9542         int i, rc;
9543
9544         if (!netif_running(bp->dev))
9545                 return -ENODEV;
9546
9547         config->hdr.length = 0;
9548         if (CHIP_IS_E1(bp))
9549                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9550         else
9551                 config->hdr.offset = BP_FUNC(bp);
9552         config->hdr.client_id = bp->fp->cl_id;
9553         config->hdr.reserved1 = 0;
9554
9555         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9556                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9557                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9558         if (rc == 0) {
9559                 bp->set_mac_pending++;
9560                 for (i = 0; i < 10; i++) {
9561                         if (!bp->set_mac_pending)
9562                                 break;
9563                         msleep_interruptible(10);
9564                 }
9565                 if (i == 10)
9566                         rc = -ENODEV;
9567         }
9568
9569         return rc;
9570 }
9571
9572 static void bnx2x_self_test(struct net_device *dev,
9573                             struct ethtool_test *etest, u64 *buf)
9574 {
9575         struct bnx2x *bp = netdev_priv(dev);
9576
9577         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9578
9579         if (!netif_running(dev))
9580                 return;
9581
9582         /* offline tests are not supported in MF mode */
9583         if (IS_E1HMF(bp))
9584                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9585
9586         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9587                 u8 link_up;
9588
9589                 link_up = bp->link_vars.link_up;
9590                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9591                 bnx2x_nic_load(bp, LOAD_DIAG);
9592                 /* wait until link state is restored */
9593                 bnx2x_wait_for_link(bp, link_up);
9594
9595                 if (bnx2x_test_registers(bp) != 0) {
9596                         buf[0] = 1;
9597                         etest->flags |= ETH_TEST_FL_FAILED;
9598                 }
9599                 if (bnx2x_test_memory(bp) != 0) {
9600                         buf[1] = 1;
9601                         etest->flags |= ETH_TEST_FL_FAILED;
9602                 }
9603                 buf[2] = bnx2x_test_loopback(bp, link_up);
9604                 if (buf[2] != 0)
9605                         etest->flags |= ETH_TEST_FL_FAILED;
9606
9607                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9608                 bnx2x_nic_load(bp, LOAD_NORMAL);
9609                 /* wait until link state is restored */
9610                 bnx2x_wait_for_link(bp, link_up);
9611         }
9612         if (bnx2x_test_nvram(bp) != 0) {
9613                 buf[3] = 1;
9614                 etest->flags |= ETH_TEST_FL_FAILED;
9615         }
9616         if (bnx2x_test_intr(bp) != 0) {
9617                 buf[4] = 1;
9618                 etest->flags |= ETH_TEST_FL_FAILED;
9619         }
9620         if (bp->port.pmf)
9621                 if (bnx2x_link_test(bp) != 0) {
9622                         buf[5] = 1;
9623                         etest->flags |= ETH_TEST_FL_FAILED;
9624                 }
9625
9626 #ifdef BNX2X_EXTRA_DEBUG
9627         bnx2x_panic_dump(bp);
9628 #endif
9629 }
9630
9631 static const struct {
9632         long offset;
9633         int size;
9634         u8 string[ETH_GSTRING_LEN];
9635 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9636 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9637         { Q_STATS_OFFSET32(error_bytes_received_hi),
9638                                                 8, "[%d]: rx_error_bytes" },
9639         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9640                                                 8, "[%d]: rx_ucast_packets" },
9641         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9642                                                 8, "[%d]: rx_mcast_packets" },
9643         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9644                                                 8, "[%d]: rx_bcast_packets" },
9645         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9646         { Q_STATS_OFFSET32(rx_err_discard_pkt),
9647                                          4, "[%d]: rx_phy_ip_err_discards"},
9648         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9649                                          4, "[%d]: rx_skb_alloc_discard" },
9650         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9651
9652 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9653         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9654                                                         8, "[%d]: tx_packets" }
9655 };
9656
9657 static const struct {
9658         long offset;
9659         int size;
9660         u32 flags;
9661 #define STATS_FLAGS_PORT                1
9662 #define STATS_FLAGS_FUNC                2
9663 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9664         u8 string[ETH_GSTRING_LEN];
9665 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9666 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9667                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
9668         { STATS_OFFSET32(error_bytes_received_hi),
9669                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9670         { STATS_OFFSET32(total_unicast_packets_received_hi),
9671                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9672         { STATS_OFFSET32(total_multicast_packets_received_hi),
9673                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9674         { STATS_OFFSET32(total_broadcast_packets_received_hi),
9675                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9676         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9677                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9678         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9679                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
9680         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9681                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9682         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9683                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9684 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9685                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9686         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9687                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9688         { STATS_OFFSET32(no_buff_discard_hi),
9689                                 8, STATS_FLAGS_BOTH, "rx_discards" },
9690         { STATS_OFFSET32(mac_filter_discard),
9691                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9692         { STATS_OFFSET32(xxoverflow_discard),
9693                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9694         { STATS_OFFSET32(brb_drop_hi),
9695                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9696         { STATS_OFFSET32(brb_truncate_hi),
9697                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9698         { STATS_OFFSET32(pause_frames_received_hi),
9699                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9700         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9701                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9702         { STATS_OFFSET32(nig_timer_max),
9703                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9704 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9705                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9706         { STATS_OFFSET32(rx_skb_alloc_failed),
9707                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9708         { STATS_OFFSET32(hw_csum_err),
9709                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9710
9711         { STATS_OFFSET32(total_bytes_transmitted_hi),
9712                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
9713         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9714                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9715         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9716                                 8, STATS_FLAGS_BOTH, "tx_packets" },
9717         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9718                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9719         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9720                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9721         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9722                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9723         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9724                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9725 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9726                                 8, STATS_FLAGS_PORT, "tx_deferred" },
9727         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9728                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9729         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9730                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9731         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9732                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9733         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9734                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9735         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9736                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9737         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9738                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9739         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9740                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9741         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9742                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9743         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9744                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9745 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9746                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9747         { STATS_OFFSET32(pause_frames_sent_hi),
9748                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9749 };
9750
9751 #define IS_PORT_STAT(i) \
9752         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9753 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9754 #define IS_E1HMF_MODE_STAT(bp) \
9755                         (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9756
9757 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9758 {
9759         struct bnx2x *bp = netdev_priv(dev);
9760         int i, j, k;
9761
9762         switch (stringset) {
9763         case ETH_SS_STATS:
9764                 if (is_multi(bp)) {
9765                         k = 0;
9766                         for_each_queue(bp, i) {
9767                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9768                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9769                                                 bnx2x_q_stats_arr[j].string, i);
9770                                 k += BNX2X_NUM_Q_STATS;
9771                         }
9772                         if (IS_E1HMF_MODE_STAT(bp))
9773                                 break;
9774                         for (j = 0; j < BNX2X_NUM_STATS; j++)
9775                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9776                                        bnx2x_stats_arr[j].string);
9777                 } else {
9778                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9779                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9780                                         continue;
9781                                 strcpy(buf + j*ETH_GSTRING_LEN,
9782                                        bnx2x_stats_arr[i].string);
9783                                 j++;
9784                         }
9785                 }
9786                 break;
9787
9788         case ETH_SS_TEST:
9789                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9790                 break;
9791         }
9792 }
9793
9794 static int bnx2x_get_stats_count(struct net_device *dev)
9795 {
9796         struct bnx2x *bp = netdev_priv(dev);
9797         int i, num_stats;
9798
9799         if (is_multi(bp)) {
9800                 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9801                 if (!IS_E1HMF_MODE_STAT(bp))
9802                         num_stats += BNX2X_NUM_STATS;
9803         } else {
9804                 if (IS_E1HMF_MODE_STAT(bp)) {
9805                         num_stats = 0;
9806                         for (i = 0; i < BNX2X_NUM_STATS; i++)
9807                                 if (IS_FUNC_STAT(i))
9808                                         num_stats++;
9809                 } else
9810                         num_stats = BNX2X_NUM_STATS;
9811         }
9812
9813         return num_stats;
9814 }
9815
9816 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9817                                     struct ethtool_stats *stats, u64 *buf)
9818 {
9819         struct bnx2x *bp = netdev_priv(dev);
9820         u32 *hw_stats, *offset;
9821         int i, j, k;
9822
9823         if (is_multi(bp)) {
9824                 k = 0;
9825                 for_each_queue(bp, i) {
9826                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9827                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9828                                 if (bnx2x_q_stats_arr[j].size == 0) {
9829                                         /* skip this counter */
9830                                         buf[k + j] = 0;
9831                                         continue;
9832                                 }
9833                                 offset = (hw_stats +
9834                                           bnx2x_q_stats_arr[j].offset);
9835                                 if (bnx2x_q_stats_arr[j].size == 4) {
9836                                         /* 4-byte counter */
9837                                         buf[k + j] = (u64) *offset;
9838                                         continue;
9839                                 }
9840                                 /* 8-byte counter */
9841                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9842                         }
9843                         k += BNX2X_NUM_Q_STATS;
9844                 }
9845                 if (IS_E1HMF_MODE_STAT(bp))
9846                         return;
9847                 hw_stats = (u32 *)&bp->eth_stats;
9848                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9849                         if (bnx2x_stats_arr[j].size == 0) {
9850                                 /* skip this counter */
9851                                 buf[k + j] = 0;
9852                                 continue;
9853                         }
9854                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
9855                         if (bnx2x_stats_arr[j].size == 4) {
9856                                 /* 4-byte counter */
9857                                 buf[k + j] = (u64) *offset;
9858                                 continue;
9859                         }
9860                         /* 8-byte counter */
9861                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
9862                 }
9863         } else {
9864                 hw_stats = (u32 *)&bp->eth_stats;
9865                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9866                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9867                                 continue;
9868                         if (bnx2x_stats_arr[i].size == 0) {
9869                                 /* skip this counter */
9870                                 buf[j] = 0;
9871                                 j++;
9872                                 continue;
9873                         }
9874                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
9875                         if (bnx2x_stats_arr[i].size == 4) {
9876                                 /* 4-byte counter */
9877                                 buf[j] = (u64) *offset;
9878                                 j++;
9879                                 continue;
9880                         }
9881                         /* 8-byte counter */
9882                         buf[j] = HILO_U64(*offset, *(offset + 1));
9883                         j++;
9884                 }
9885         }
9886 }
9887
9888 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9889 {
9890         struct bnx2x *bp = netdev_priv(dev);
9891         int port = BP_PORT(bp);
9892         int i;
9893
9894         if (!netif_running(dev))
9895                 return 0;
9896
9897         if (!bp->port.pmf)
9898                 return 0;
9899
9900         if (data == 0)
9901                 data = 2;
9902
9903         for (i = 0; i < (data * 2); i++) {
9904                 if ((i % 2) == 0)
9905                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9906                                       bp->link_params.hw_led_mode,
9907                                       bp->link_params.chip_id);
9908                 else
9909                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9910                                       bp->link_params.hw_led_mode,
9911                                       bp->link_params.chip_id);
9912
9913                 msleep_interruptible(500);
9914                 if (signal_pending(current))
9915                         break;
9916         }
9917
9918         if (bp->link_vars.link_up)
9919                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9920                               bp->link_vars.line_speed,
9921                               bp->link_params.hw_led_mode,
9922                               bp->link_params.chip_id);
9923
9924         return 0;
9925 }
9926
9927 static struct ethtool_ops bnx2x_ethtool_ops = {
9928         .get_settings           = bnx2x_get_settings,
9929         .set_settings           = bnx2x_set_settings,
9930         .get_drvinfo            = bnx2x_get_drvinfo,
9931         .get_wol                = bnx2x_get_wol,
9932         .set_wol                = bnx2x_set_wol,
9933         .get_msglevel           = bnx2x_get_msglevel,
9934         .set_msglevel           = bnx2x_set_msglevel,
9935         .nway_reset             = bnx2x_nway_reset,
9936         .get_link               = ethtool_op_get_link,
9937         .get_eeprom_len         = bnx2x_get_eeprom_len,
9938         .get_eeprom             = bnx2x_get_eeprom,
9939         .set_eeprom             = bnx2x_set_eeprom,
9940         .get_coalesce           = bnx2x_get_coalesce,
9941         .set_coalesce           = bnx2x_set_coalesce,
9942         .get_ringparam          = bnx2x_get_ringparam,
9943         .set_ringparam          = bnx2x_set_ringparam,
9944         .get_pauseparam         = bnx2x_get_pauseparam,
9945         .set_pauseparam         = bnx2x_set_pauseparam,
9946         .get_rx_csum            = bnx2x_get_rx_csum,
9947         .set_rx_csum            = bnx2x_set_rx_csum,
9948         .get_tx_csum            = ethtool_op_get_tx_csum,
9949         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9950         .set_flags              = bnx2x_set_flags,
9951         .get_flags              = ethtool_op_get_flags,
9952         .get_sg                 = ethtool_op_get_sg,
9953         .set_sg                 = ethtool_op_set_sg,
9954         .get_tso                = ethtool_op_get_tso,
9955         .set_tso                = bnx2x_set_tso,
9956         .self_test_count        = bnx2x_self_test_count,
9957         .self_test              = bnx2x_self_test,
9958         .get_strings            = bnx2x_get_strings,
9959         .phys_id                = bnx2x_phys_id,
9960         .get_stats_count        = bnx2x_get_stats_count,
9961         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9962 };
9963
9964 /* end of ethtool_ops */
9965
9966 /****************************************************************************
9967 * General service functions
9968 ****************************************************************************/
9969
9970 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9971 {
9972         u16 pmcsr;
9973
9974         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9975
9976         switch (state) {
9977         case PCI_D0:
9978                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9979                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9980                                        PCI_PM_CTRL_PME_STATUS));
9981
9982                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9983                         /* delay required during transition out of D3hot */
9984                         msleep(20);
9985                 break;
9986
9987         case PCI_D3hot:
9988                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9989                 pmcsr |= 3;
9990
9991                 if (bp->wol)
9992                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9993
9994                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9995                                       pmcsr);
9996
9997                 /* No more memory access after this point until
9998                 * device is brought back to D0.
9999                 */
10000                 break;
10001
10002         default:
10003                 return -EINVAL;
10004         }
10005         return 0;
10006 }
10007
10008 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10009 {
10010         u16 rx_cons_sb;
10011
10012         /* Tell compiler that status block fields can change */
10013         barrier();
10014         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10015         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10016                 rx_cons_sb++;
10017         return (fp->rx_comp_cons != rx_cons_sb);
10018 }
10019
10020 /*
10021  * net_device service functions
10022  */
10023
10024 static int bnx2x_poll(struct napi_struct *napi, int budget)
10025 {
10026         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10027                                                  napi);
10028         struct bnx2x *bp = fp->bp;
10029         int work_done = 0;
10030
10031 #ifdef BNX2X_STOP_ON_ERROR
10032         if (unlikely(bp->panic))
10033                 goto poll_panic;
10034 #endif
10035
10036         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10037         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10038         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10039
10040         bnx2x_update_fpsb_idx(fp);
10041
10042         if (bnx2x_has_tx_work(fp))
10043                 bnx2x_tx_int(fp);
10044
10045         if (bnx2x_has_rx_work(fp)) {
10046                 work_done = bnx2x_rx_int(fp, budget);
10047
10048                 /* must not complete if we consumed full budget */
10049                 if (work_done >= budget)
10050                         goto poll_again;
10051         }
10052
10053         /* BNX2X_HAS_WORK() reads the status block, thus we need to
10054          * ensure that status block indices have been actually read
10055          * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10056          * so that we won't write the "newer" value of the status block to IGU
10057          * (if there was a DMA right after BNX2X_HAS_WORK and
10058          * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10059          * may be postponed to right before bnx2x_ack_sb). In this case
10060          * there will never be another interrupt until there is another update
10061          * of the status block, while there is still unhandled work.
10062          */
10063         rmb();
10064
10065         if (!BNX2X_HAS_WORK(fp)) {
10066 #ifdef BNX2X_STOP_ON_ERROR
10067 poll_panic:
10068 #endif
10069                 napi_complete(napi);
10070
10071                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10072                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10073                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10074                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10075         }
10076
10077 poll_again:
10078         return work_done;
10079 }
10080
10081
10082 /* we split the first BD into headers and data BDs
10083  * to ease the pain of our fellow microcode engineers
10084  * we use one mapping for both BDs
10085  * So far this has only been observed to happen
10086  * in Other Operating Systems(TM)
10087  */
10088 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10089                                    struct bnx2x_fastpath *fp,
10090                                    struct eth_tx_bd **tx_bd, u16 hlen,
10091                                    u16 bd_prod, int nbd)
10092 {
10093         struct eth_tx_bd *h_tx_bd = *tx_bd;
10094         struct eth_tx_bd *d_tx_bd;
10095         dma_addr_t mapping;
10096         int old_len = le16_to_cpu(h_tx_bd->nbytes);
10097
10098         /* first fix first BD */
10099         h_tx_bd->nbd = cpu_to_le16(nbd);
10100         h_tx_bd->nbytes = cpu_to_le16(hlen);
10101
10102         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10103            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10104            h_tx_bd->addr_lo, h_tx_bd->nbd);
10105
10106         /* now get a new data BD
10107          * (after the pbd) and fill it */
10108         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10109         d_tx_bd = &fp->tx_desc_ring[bd_prod];
10110
10111         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10112                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10113
10114         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10115         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10116         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10117         d_tx_bd->vlan = 0;
10118         /* this marks the BD as one that has no individual mapping
10119          * the FW ignores this flag in a BD not marked start
10120          */
10121         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10122         DP(NETIF_MSG_TX_QUEUED,
10123            "TSO split data size is %d (%x:%x)\n",
10124            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10125
10126         /* update tx_bd for marking the last BD flag */
10127         *tx_bd = d_tx_bd;
10128
10129         return bd_prod;
10130 }
10131
10132 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10133 {
10134         if (fix > 0)
10135                 csum = (u16) ~csum_fold(csum_sub(csum,
10136                                 csum_partial(t_header - fix, fix, 0)));
10137
10138         else if (fix < 0)
10139                 csum = (u16) ~csum_fold(csum_add(csum,
10140                                 csum_partial(t_header, -fix, 0)));
10141
10142         return swab16(csum);
10143 }
10144
10145 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10146 {
10147         u32 rc;
10148
10149         if (skb->ip_summed != CHECKSUM_PARTIAL)
10150                 rc = XMIT_PLAIN;
10151
10152         else {
10153                 if (skb->protocol == htons(ETH_P_IPV6)) {
10154                         rc = XMIT_CSUM_V6;
10155                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10156                                 rc |= XMIT_CSUM_TCP;
10157
10158                 } else {
10159                         rc = XMIT_CSUM_V4;
10160                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10161                                 rc |= XMIT_CSUM_TCP;
10162                 }
10163         }
10164
10165         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10166                 rc |= XMIT_GSO_V4;
10167
10168         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10169                 rc |= XMIT_GSO_V6;
10170
10171         return rc;
10172 }
10173
10174 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10175 /* check if packet requires linearization (packet is too fragmented)
10176    no need to check fragmentation if page size > 8K (there will be no
10177    violation to FW restrictions) */
10178 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10179                              u32 xmit_type)
10180 {
10181         int to_copy = 0;
10182         int hlen = 0;
10183         int first_bd_sz = 0;
10184
10185         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10186         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10187
10188                 if (xmit_type & XMIT_GSO) {
10189                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10190                         /* Check if LSO packet needs to be copied:
10191                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10192                         int wnd_size = MAX_FETCH_BD - 3;
10193                         /* Number of windows to check */
10194                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10195                         int wnd_idx = 0;
10196                         int frag_idx = 0;
10197                         u32 wnd_sum = 0;
10198
10199                         /* Headers length */
10200                         hlen = (int)(skb_transport_header(skb) - skb->data) +
10201                                 tcp_hdrlen(skb);
10202
10203                         /* Amount of data (w/o headers) on linear part of SKB*/
10204                         first_bd_sz = skb_headlen(skb) - hlen;
10205
10206                         wnd_sum  = first_bd_sz;
10207
10208                         /* Calculate the first sum - it's special */
10209                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10210                                 wnd_sum +=
10211                                         skb_shinfo(skb)->frags[frag_idx].size;
10212
10213                         /* If there was data on linear skb data - check it */
10214                         if (first_bd_sz > 0) {
10215                                 if (unlikely(wnd_sum < lso_mss)) {
10216                                         to_copy = 1;
10217                                         goto exit_lbl;
10218                                 }
10219
10220                                 wnd_sum -= first_bd_sz;
10221                         }
10222
10223                         /* Others are easier: run through the frag list and
10224                            check all windows */
10225                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10226                                 wnd_sum +=
10227                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10228
10229                                 if (unlikely(wnd_sum < lso_mss)) {
10230                                         to_copy = 1;
10231                                         break;
10232                                 }
10233                                 wnd_sum -=
10234                                         skb_shinfo(skb)->frags[wnd_idx].size;
10235                         }
10236                 } else {
10237                         /* in non-LSO too fragmented packet should always
10238                            be linearized */
10239                         to_copy = 1;
10240                 }
10241         }
10242
10243 exit_lbl:
10244         if (unlikely(to_copy))
10245                 DP(NETIF_MSG_TX_QUEUED,
10246                    "Linearization IS REQUIRED for %s packet. "
10247                    "num_frags %d  hlen %d  first_bd_sz %d\n",
10248                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10249                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10250
10251         return to_copy;
10252 }
10253 #endif
10254
10255 /* called with netif_tx_lock
10256  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10257  * netif_wake_queue()
10258  */
10259 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10260 {
10261         struct bnx2x *bp = netdev_priv(dev);
10262         struct bnx2x_fastpath *fp;
10263         struct netdev_queue *txq;
10264         struct sw_tx_bd *tx_buf;
10265         struct eth_tx_bd *tx_bd;
10266         struct eth_tx_parse_bd *pbd = NULL;
10267         u16 pkt_prod, bd_prod;
10268         int nbd, fp_index;
10269         dma_addr_t mapping;
10270         u32 xmit_type = bnx2x_xmit_type(bp, skb);
10271         int vlan_off = (bp->e1hov ? 4 : 0);
10272         int i;
10273         u8 hlen = 0;
10274
10275 #ifdef BNX2X_STOP_ON_ERROR
10276         if (unlikely(bp->panic))
10277                 return NETDEV_TX_BUSY;
10278 #endif
10279
10280         fp_index = skb_get_queue_mapping(skb);
10281         txq = netdev_get_tx_queue(dev, fp_index);
10282
10283         fp = &bp->fp[fp_index];
10284
10285         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10286                 fp->eth_q_stats.driver_xoff++,
10287                 netif_tx_stop_queue(txq);
10288                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10289                 return NETDEV_TX_BUSY;
10290         }
10291
10292         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
10293            "  gso type %x  xmit_type %x\n",
10294            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10295            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10296
10297 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10298         /* First, check if we need to linearize the skb (due to FW
10299            restrictions). No need to check fragmentation if page size > 8K
10300            (there will be no violation to FW restrictions) */
10301         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10302                 /* Statistics of linearization */
10303                 bp->lin_cnt++;
10304                 if (skb_linearize(skb) != 0) {
10305                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10306                            "silently dropping this SKB\n");
10307                         dev_kfree_skb_any(skb);
10308                         return NETDEV_TX_OK;
10309                 }
10310         }
10311 #endif
10312
10313         /*
10314         Please read carefully. First we use one BD which we mark as start,
10315         then for TSO or xsum we have a parsing info BD,
10316         and only then we have the rest of the TSO BDs.
10317         (don't forget to mark the last one as last,
10318         and to unmap only AFTER you write to the BD ...)
10319         And above all, all pdb sizes are in words - NOT DWORDS!
10320         */
10321
10322         pkt_prod = fp->tx_pkt_prod++;
10323         bd_prod = TX_BD(fp->tx_bd_prod);
10324
10325         /* get a tx_buf and first BD */
10326         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10327         tx_bd = &fp->tx_desc_ring[bd_prod];
10328
10329         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10330         tx_bd->general_data = (UNICAST_ADDRESS <<
10331                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10332         /* header nbd */
10333         tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10334
10335         /* remember the first BD of the packet */
10336         tx_buf->first_bd = fp->tx_bd_prod;
10337         tx_buf->skb = skb;
10338
10339         DP(NETIF_MSG_TX_QUEUED,
10340            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
10341            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10342
10343 #ifdef BCM_VLAN
10344         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10345             (bp->flags & HW_VLAN_TX_FLAG)) {
10346                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10347                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10348                 vlan_off += 4;
10349         } else
10350 #endif
10351                 tx_bd->vlan = cpu_to_le16(pkt_prod);
10352
10353         if (xmit_type) {
10354                 /* turn on parsing and get a BD */
10355                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10356                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10357
10358                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10359         }
10360
10361         if (xmit_type & XMIT_CSUM) {
10362                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10363
10364                 /* for now NS flag is not used in Linux */
10365                 pbd->global_data =
10366                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10367                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10368
10369                 pbd->ip_hlen = (skb_transport_header(skb) -
10370                                 skb_network_header(skb)) / 2;
10371
10372                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10373
10374                 pbd->total_hlen = cpu_to_le16(hlen);
10375                 hlen = hlen*2 - vlan_off;
10376
10377                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10378
10379                 if (xmit_type & XMIT_CSUM_V4)
10380                         tx_bd->bd_flags.as_bitfield |=
10381                                                 ETH_TX_BD_FLAGS_IP_CSUM;
10382                 else
10383                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10384
10385                 if (xmit_type & XMIT_CSUM_TCP) {
10386                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10387
10388                 } else {
10389                         s8 fix = SKB_CS_OFF(skb); /* signed! */
10390
10391                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10392                         pbd->cs_offset = fix / 2;
10393
10394                         DP(NETIF_MSG_TX_QUEUED,
10395                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
10396                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10397                            SKB_CS(skb));
10398
10399                         /* HW bug: fixup the CSUM */
10400                         pbd->tcp_pseudo_csum =
10401                                 bnx2x_csum_fix(skb_transport_header(skb),
10402                                                SKB_CS(skb), fix);
10403
10404                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10405                            pbd->tcp_pseudo_csum);
10406                 }
10407         }
10408
10409         mapping = pci_map_single(bp->pdev, skb->data,
10410                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10411
10412         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10413         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10414         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10415         tx_bd->nbd = cpu_to_le16(nbd);
10416         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10417
10418         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
10419            "  nbytes %d  flags %x  vlan %x\n",
10420            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10421            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10422            le16_to_cpu(tx_bd->vlan));
10423
10424         if (xmit_type & XMIT_GSO) {
10425
10426                 DP(NETIF_MSG_TX_QUEUED,
10427                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
10428                    skb->len, hlen, skb_headlen(skb),
10429                    skb_shinfo(skb)->gso_size);
10430
10431                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10432
10433                 if (unlikely(skb_headlen(skb) > hlen))
10434                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10435                                                  bd_prod, ++nbd);
10436
10437                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10438                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10439                 pbd->tcp_flags = pbd_tcp_flags(skb);
10440
10441                 if (xmit_type & XMIT_GSO_V4) {
10442                         pbd->ip_id = swab16(ip_hdr(skb)->id);
10443                         pbd->tcp_pseudo_csum =
10444                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10445                                                           ip_hdr(skb)->daddr,
10446                                                           0, IPPROTO_TCP, 0));
10447
10448                 } else
10449                         pbd->tcp_pseudo_csum =
10450                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10451                                                         &ipv6_hdr(skb)->daddr,
10452                                                         0, IPPROTO_TCP, 0));
10453
10454                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10455         }
10456
10457         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10458                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10459
10460                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10461                 tx_bd = &fp->tx_desc_ring[bd_prod];
10462
10463                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10464                                        frag->size, PCI_DMA_TODEVICE);
10465
10466                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10467                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10468                 tx_bd->nbytes = cpu_to_le16(frag->size);
10469                 tx_bd->vlan = cpu_to_le16(pkt_prod);
10470                 tx_bd->bd_flags.as_bitfield = 0;
10471
10472                 DP(NETIF_MSG_TX_QUEUED,
10473                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
10474                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10475                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10476         }
10477
10478         /* now at last mark the BD as the last BD */
10479         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10480
10481         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
10482            tx_bd, tx_bd->bd_flags.as_bitfield);
10483
10484         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10485
10486         /* now send a tx doorbell, counting the next BD
10487          * if the packet contains or ends with it
10488          */
10489         if (TX_BD_POFF(bd_prod) < nbd)
10490                 nbd++;
10491
10492         if (pbd)
10493                 DP(NETIF_MSG_TX_QUEUED,
10494                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
10495                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
10496                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10497                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10498                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10499
10500         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
10501
10502         /*
10503          * Make sure that the BD data is updated before updating the producer
10504          * since FW might read the BD right after the producer is updated.
10505          * This is only applicable for weak-ordered memory model archs such
10506          * as IA-64. The following barrier is also mandatory since FW will
10507          * assumes packets must have BDs.
10508          */
10509         wmb();
10510
10511         le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
10512         mb(); /* FW restriction: must not reorder writing nbd and packets */
10513         le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
10514         DOORBELL(bp, fp->index, 0);
10515
10516         mmiowb();
10517
10518         fp->tx_bd_prod += nbd;
10519         dev->trans_start = jiffies;
10520
10521         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10522                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10523                    if we put Tx into XOFF state. */
10524                 smp_mb();
10525                 netif_tx_stop_queue(txq);
10526                 fp->eth_q_stats.driver_xoff++;
10527                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10528                         netif_tx_wake_queue(txq);
10529         }
10530         fp->tx_pkt++;
10531
10532         return NETDEV_TX_OK;
10533 }
10534
10535 /* called with rtnl_lock */
10536 static int bnx2x_open(struct net_device *dev)
10537 {
10538         struct bnx2x *bp = netdev_priv(dev);
10539
10540         netif_carrier_off(dev);
10541
10542         bnx2x_set_power_state(bp, PCI_D0);
10543
10544         return bnx2x_nic_load(bp, LOAD_OPEN);
10545 }
10546
10547 /* called with rtnl_lock */
10548 static int bnx2x_close(struct net_device *dev)
10549 {
10550         struct bnx2x *bp = netdev_priv(dev);
10551
10552         /* Unload the driver, release IRQs */
10553         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10554         if (atomic_read(&bp->pdev->enable_cnt) == 1)
10555                 if (!CHIP_REV_IS_SLOW(bp))
10556                         bnx2x_set_power_state(bp, PCI_D3hot);
10557
10558         return 0;
10559 }
10560
10561 /* called with netif_tx_lock from dev_mcast.c */
10562 static void bnx2x_set_rx_mode(struct net_device *dev)
10563 {
10564         struct bnx2x *bp = netdev_priv(dev);
10565         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10566         int port = BP_PORT(bp);
10567
10568         if (bp->state != BNX2X_STATE_OPEN) {
10569                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10570                 return;
10571         }
10572
10573         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10574
10575         if (dev->flags & IFF_PROMISC)
10576                 rx_mode = BNX2X_RX_MODE_PROMISC;
10577
10578         else if ((dev->flags & IFF_ALLMULTI) ||
10579                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10580                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10581
10582         else { /* some multicasts */
10583                 if (CHIP_IS_E1(bp)) {
10584                         int i, old, offset;
10585                         struct dev_mc_list *mclist;
10586                         struct mac_configuration_cmd *config =
10587                                                 bnx2x_sp(bp, mcast_config);
10588
10589                         for (i = 0, mclist = dev->mc_list;
10590                              mclist && (i < dev->mc_count);
10591                              i++, mclist = mclist->next) {
10592
10593                                 config->config_table[i].
10594                                         cam_entry.msb_mac_addr =
10595                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
10596                                 config->config_table[i].
10597                                         cam_entry.middle_mac_addr =
10598                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
10599                                 config->config_table[i].
10600                                         cam_entry.lsb_mac_addr =
10601                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
10602                                 config->config_table[i].cam_entry.flags =
10603                                                         cpu_to_le16(port);
10604                                 config->config_table[i].
10605                                         target_table_entry.flags = 0;
10606                                 config->config_table[i].
10607                                         target_table_entry.client_id = 0;
10608                                 config->config_table[i].
10609                                         target_table_entry.vlan_id = 0;
10610
10611                                 DP(NETIF_MSG_IFUP,
10612                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10613                                    config->config_table[i].
10614                                                 cam_entry.msb_mac_addr,
10615                                    config->config_table[i].
10616                                                 cam_entry.middle_mac_addr,
10617                                    config->config_table[i].
10618                                                 cam_entry.lsb_mac_addr);
10619                         }
10620                         old = config->hdr.length;
10621                         if (old > i) {
10622                                 for (; i < old; i++) {
10623                                         if (CAM_IS_INVALID(config->
10624                                                            config_table[i])) {
10625                                                 /* already invalidated */
10626                                                 break;
10627                                         }
10628                                         /* invalidate */
10629                                         CAM_INVALIDATE(config->
10630                                                        config_table[i]);
10631                                 }
10632                         }
10633
10634                         if (CHIP_REV_IS_SLOW(bp))
10635                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10636                         else
10637                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
10638
10639                         config->hdr.length = i;
10640                         config->hdr.offset = offset;
10641                         config->hdr.client_id = bp->fp->cl_id;
10642                         config->hdr.reserved1 = 0;
10643
10644                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10645                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10646                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10647                                       0);
10648                 } else { /* E1H */
10649                         /* Accept one or more multicasts */
10650                         struct dev_mc_list *mclist;
10651                         u32 mc_filter[MC_HASH_SIZE];
10652                         u32 crc, bit, regidx;
10653                         int i;
10654
10655                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10656
10657                         for (i = 0, mclist = dev->mc_list;
10658                              mclist && (i < dev->mc_count);
10659                              i++, mclist = mclist->next) {
10660
10661                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10662                                    mclist->dmi_addr);
10663
10664                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10665                                 bit = (crc >> 24) & 0xff;
10666                                 regidx = bit >> 5;
10667                                 bit &= 0x1f;
10668                                 mc_filter[regidx] |= (1 << bit);
10669                         }
10670
10671                         for (i = 0; i < MC_HASH_SIZE; i++)
10672                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10673                                        mc_filter[i]);
10674                 }
10675         }
10676
10677         bp->rx_mode = rx_mode;
10678         bnx2x_set_storm_rx_mode(bp);
10679 }
10680
10681 /* called with rtnl_lock */
10682 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10683 {
10684         struct sockaddr *addr = p;
10685         struct bnx2x *bp = netdev_priv(dev);
10686
10687         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10688                 return -EINVAL;
10689
10690         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10691         if (netif_running(dev)) {
10692                 if (CHIP_IS_E1(bp))
10693                         bnx2x_set_mac_addr_e1(bp, 1);
10694                 else
10695                         bnx2x_set_mac_addr_e1h(bp, 1);
10696         }
10697
10698         return 0;
10699 }
10700
10701 /* called with rtnl_lock */
10702 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10703 {
10704         struct mii_ioctl_data *data = if_mii(ifr);
10705         struct bnx2x *bp = netdev_priv(dev);
10706         int port = BP_PORT(bp);
10707         int err;
10708
10709         switch (cmd) {
10710         case SIOCGMIIPHY:
10711                 data->phy_id = bp->port.phy_addr;
10712
10713                 /* fallthrough */
10714
10715         case SIOCGMIIREG: {
10716                 u16 mii_regval;
10717
10718                 if (!netif_running(dev))
10719                         return -EAGAIN;
10720
10721                 mutex_lock(&bp->port.phy_mutex);
10722                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10723                                       DEFAULT_PHY_DEV_ADDR,
10724                                       (data->reg_num & 0x1f), &mii_regval);
10725                 data->val_out = mii_regval;
10726                 mutex_unlock(&bp->port.phy_mutex);
10727                 return err;
10728         }
10729
10730         case SIOCSMIIREG:
10731                 if (!capable(CAP_NET_ADMIN))
10732                         return -EPERM;
10733
10734                 if (!netif_running(dev))
10735                         return -EAGAIN;
10736
10737                 mutex_lock(&bp->port.phy_mutex);
10738                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10739                                        DEFAULT_PHY_DEV_ADDR,
10740                                        (data->reg_num & 0x1f), data->val_in);
10741                 mutex_unlock(&bp->port.phy_mutex);
10742                 return err;
10743
10744         default:
10745                 /* do nothing */
10746                 break;
10747         }
10748
10749         return -EOPNOTSUPP;
10750 }
10751
10752 /* called with rtnl_lock */
10753 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10754 {
10755         struct bnx2x *bp = netdev_priv(dev);
10756         int rc = 0;
10757
10758         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10759             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10760                 return -EINVAL;
10761
10762         /* This does not race with packet allocation
10763          * because the actual alloc size is
10764          * only updated as part of load
10765          */
10766         dev->mtu = new_mtu;
10767
10768         if (netif_running(dev)) {
10769                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10770                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10771         }
10772
10773         return rc;
10774 }
10775
10776 static void bnx2x_tx_timeout(struct net_device *dev)
10777 {
10778         struct bnx2x *bp = netdev_priv(dev);
10779
10780 #ifdef BNX2X_STOP_ON_ERROR
10781         if (!bp->panic)
10782                 bnx2x_panic();
10783 #endif
10784         /* This allows the netif to be shutdown gracefully before resetting */
10785         schedule_work(&bp->reset_task);
10786 }
10787
10788 #ifdef BCM_VLAN
10789 /* called with rtnl_lock */
10790 static void bnx2x_vlan_rx_register(struct net_device *dev,
10791                                    struct vlan_group *vlgrp)
10792 {
10793         struct bnx2x *bp = netdev_priv(dev);
10794
10795         bp->vlgrp = vlgrp;
10796
10797         /* Set flags according to the required capabilities */
10798         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10799
10800         if (dev->features & NETIF_F_HW_VLAN_TX)
10801                 bp->flags |= HW_VLAN_TX_FLAG;
10802
10803         if (dev->features & NETIF_F_HW_VLAN_RX)
10804                 bp->flags |= HW_VLAN_RX_FLAG;
10805
10806         if (netif_running(dev))
10807                 bnx2x_set_client_config(bp);
10808 }
10809
10810 #endif
10811
10812 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10813 static void poll_bnx2x(struct net_device *dev)
10814 {
10815         struct bnx2x *bp = netdev_priv(dev);
10816
10817         disable_irq(bp->pdev->irq);
10818         bnx2x_interrupt(bp->pdev->irq, dev);
10819         enable_irq(bp->pdev->irq);
10820 }
10821 #endif
10822
10823 static const struct net_device_ops bnx2x_netdev_ops = {
10824         .ndo_open               = bnx2x_open,
10825         .ndo_stop               = bnx2x_close,
10826         .ndo_start_xmit         = bnx2x_start_xmit,
10827         .ndo_set_multicast_list = bnx2x_set_rx_mode,
10828         .ndo_set_mac_address    = bnx2x_change_mac_addr,
10829         .ndo_validate_addr      = eth_validate_addr,
10830         .ndo_do_ioctl           = bnx2x_ioctl,
10831         .ndo_change_mtu         = bnx2x_change_mtu,
10832         .ndo_tx_timeout         = bnx2x_tx_timeout,
10833 #ifdef BCM_VLAN
10834         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
10835 #endif
10836 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10837         .ndo_poll_controller    = poll_bnx2x,
10838 #endif
10839 };
10840
10841 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10842                                     struct net_device *dev)
10843 {
10844         struct bnx2x *bp;
10845         int rc;
10846
10847         SET_NETDEV_DEV(dev, &pdev->dev);
10848         bp = netdev_priv(dev);
10849
10850         bp->dev = dev;
10851         bp->pdev = pdev;
10852         bp->flags = 0;
10853         bp->func = PCI_FUNC(pdev->devfn);
10854
10855         rc = pci_enable_device(pdev);
10856         if (rc) {
10857                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10858                 goto err_out;
10859         }
10860
10861         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10862                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10863                        " aborting\n");
10864                 rc = -ENODEV;
10865                 goto err_out_disable;
10866         }
10867
10868         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10869                 printk(KERN_ERR PFX "Cannot find second PCI device"
10870                        " base address, aborting\n");
10871                 rc = -ENODEV;
10872                 goto err_out_disable;
10873         }
10874
10875         if (atomic_read(&pdev->enable_cnt) == 1) {
10876                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10877                 if (rc) {
10878                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10879                                " aborting\n");
10880                         goto err_out_disable;
10881                 }
10882
10883                 pci_set_master(pdev);
10884                 pci_save_state(pdev);
10885         }
10886
10887         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10888         if (bp->pm_cap == 0) {
10889                 printk(KERN_ERR PFX "Cannot find power management"
10890                        " capability, aborting\n");
10891                 rc = -EIO;
10892                 goto err_out_release;
10893         }
10894
10895         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10896         if (bp->pcie_cap == 0) {
10897                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10898                        " aborting\n");
10899                 rc = -EIO;
10900                 goto err_out_release;
10901         }
10902
10903         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10904                 bp->flags |= USING_DAC_FLAG;
10905                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10906                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10907                                " failed, aborting\n");
10908                         rc = -EIO;
10909                         goto err_out_release;
10910                 }
10911
10912         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10913                 printk(KERN_ERR PFX "System does not support DMA,"
10914                        " aborting\n");
10915                 rc = -EIO;
10916                 goto err_out_release;
10917         }
10918
10919         dev->mem_start = pci_resource_start(pdev, 0);
10920         dev->base_addr = dev->mem_start;
10921         dev->mem_end = pci_resource_end(pdev, 0);
10922
10923         dev->irq = pdev->irq;
10924
10925         bp->regview = pci_ioremap_bar(pdev, 0);
10926         if (!bp->regview) {
10927                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10928                 rc = -ENOMEM;
10929                 goto err_out_release;
10930         }
10931
10932         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10933                                         min_t(u64, BNX2X_DB_SIZE,
10934                                               pci_resource_len(pdev, 2)));
10935         if (!bp->doorbells) {
10936                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10937                 rc = -ENOMEM;
10938                 goto err_out_unmap;
10939         }
10940
10941         bnx2x_set_power_state(bp, PCI_D0);
10942
10943         /* clean indirect addresses */
10944         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10945                                PCICFG_VENDOR_ID_OFFSET);
10946         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10947         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10948         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10949         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10950
10951         dev->watchdog_timeo = TX_TIMEOUT;
10952
10953         dev->netdev_ops = &bnx2x_netdev_ops;
10954         dev->ethtool_ops = &bnx2x_ethtool_ops;
10955         dev->features |= NETIF_F_SG;
10956         dev->features |= NETIF_F_HW_CSUM;
10957         if (bp->flags & USING_DAC_FLAG)
10958                 dev->features |= NETIF_F_HIGHDMA;
10959 #ifdef BCM_VLAN
10960         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10961         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10962 #endif
10963         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10964         dev->features |= NETIF_F_TSO6;
10965
10966         return 0;
10967
10968 err_out_unmap:
10969         if (bp->regview) {
10970                 iounmap(bp->regview);
10971                 bp->regview = NULL;
10972         }
10973         if (bp->doorbells) {
10974                 iounmap(bp->doorbells);
10975                 bp->doorbells = NULL;
10976         }
10977
10978 err_out_release:
10979         if (atomic_read(&pdev->enable_cnt) == 1)
10980                 pci_release_regions(pdev);
10981
10982 err_out_disable:
10983         pci_disable_device(pdev);
10984         pci_set_drvdata(pdev, NULL);
10985
10986 err_out:
10987         return rc;
10988 }
10989
10990 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10991 {
10992         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10993
10994         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10995         return val;
10996 }
10997
10998 /* return value of 1=2.5GHz 2=5GHz */
10999 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11000 {
11001         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11002
11003         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11004         return val;
11005 }
11006
11007 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11008                                     const struct pci_device_id *ent)
11009 {
11010         static int version_printed;
11011         struct net_device *dev = NULL;
11012         struct bnx2x *bp;
11013         int rc;
11014
11015         if (version_printed++ == 0)
11016                 printk(KERN_INFO "%s", version);
11017
11018         /* dev zeroed in init_etherdev */
11019         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11020         if (!dev) {
11021                 printk(KERN_ERR PFX "Cannot allocate net device\n");
11022                 return -ENOMEM;
11023         }
11024
11025         bp = netdev_priv(dev);
11026         bp->msglevel = debug;
11027
11028         rc = bnx2x_init_dev(pdev, dev);
11029         if (rc < 0) {
11030                 free_netdev(dev);
11031                 return rc;
11032         }
11033
11034         pci_set_drvdata(pdev, dev);
11035
11036         rc = bnx2x_init_bp(bp);
11037         if (rc)
11038                 goto init_one_exit;
11039
11040         rc = register_netdev(dev);
11041         if (rc) {
11042                 dev_err(&pdev->dev, "Cannot register net device\n");
11043                 goto init_one_exit;
11044         }
11045
11046         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11047                " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11048                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11049                bnx2x_get_pcie_width(bp),
11050                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11051                dev->base_addr, bp->pdev->irq);
11052         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11053         return 0;
11054
11055 init_one_exit:
11056         if (bp->regview)
11057                 iounmap(bp->regview);
11058
11059         if (bp->doorbells)
11060                 iounmap(bp->doorbells);
11061
11062         free_netdev(dev);
11063
11064         if (atomic_read(&pdev->enable_cnt) == 1)
11065                 pci_release_regions(pdev);
11066
11067         pci_disable_device(pdev);
11068         pci_set_drvdata(pdev, NULL);
11069
11070         return rc;
11071 }
11072
11073 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11074 {
11075         struct net_device *dev = pci_get_drvdata(pdev);
11076         struct bnx2x *bp;
11077
11078         if (!dev) {
11079                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11080                 return;
11081         }
11082         bp = netdev_priv(dev);
11083
11084         unregister_netdev(dev);
11085
11086         if (bp->regview)
11087                 iounmap(bp->regview);
11088
11089         if (bp->doorbells)
11090                 iounmap(bp->doorbells);
11091
11092         free_netdev(dev);
11093
11094         if (atomic_read(&pdev->enable_cnt) == 1)
11095                 pci_release_regions(pdev);
11096
11097         pci_disable_device(pdev);
11098         pci_set_drvdata(pdev, NULL);
11099 }
11100
11101 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11102 {
11103         struct net_device *dev = pci_get_drvdata(pdev);
11104         struct bnx2x *bp;
11105
11106         if (!dev) {
11107                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11108                 return -ENODEV;
11109         }
11110         bp = netdev_priv(dev);
11111
11112         rtnl_lock();
11113
11114         pci_save_state(pdev);
11115
11116         if (!netif_running(dev)) {
11117                 rtnl_unlock();
11118                 return 0;
11119         }
11120
11121         netif_device_detach(dev);
11122
11123         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11124
11125         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11126
11127         rtnl_unlock();
11128
11129         return 0;
11130 }
11131
11132 static int bnx2x_resume(struct pci_dev *pdev)
11133 {
11134         struct net_device *dev = pci_get_drvdata(pdev);
11135         struct bnx2x *bp;
11136         int rc;
11137
11138         if (!dev) {
11139                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11140                 return -ENODEV;
11141         }
11142         bp = netdev_priv(dev);
11143
11144         rtnl_lock();
11145
11146         pci_restore_state(pdev);
11147
11148         if (!netif_running(dev)) {
11149                 rtnl_unlock();
11150                 return 0;
11151         }
11152
11153         bnx2x_set_power_state(bp, PCI_D0);
11154         netif_device_attach(dev);
11155
11156         rc = bnx2x_nic_load(bp, LOAD_OPEN);
11157
11158         rtnl_unlock();
11159
11160         return rc;
11161 }
11162
11163 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11164 {
11165         int i;
11166
11167         bp->state = BNX2X_STATE_ERROR;
11168
11169         bp->rx_mode = BNX2X_RX_MODE_NONE;
11170
11171         bnx2x_netif_stop(bp, 0);
11172
11173         del_timer_sync(&bp->timer);
11174         bp->stats_state = STATS_STATE_DISABLED;
11175         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11176
11177         /* Release IRQs */
11178         bnx2x_free_irq(bp);
11179
11180         if (CHIP_IS_E1(bp)) {
11181                 struct mac_configuration_cmd *config =
11182                                                 bnx2x_sp(bp, mcast_config);
11183
11184                 for (i = 0; i < config->hdr.length; i++)
11185                         CAM_INVALIDATE(config->config_table[i]);
11186         }
11187
11188         /* Free SKBs, SGEs, TPA pool and driver internals */
11189         bnx2x_free_skbs(bp);
11190         for_each_rx_queue(bp, i)
11191                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11192         for_each_rx_queue(bp, i)
11193                 netif_napi_del(&bnx2x_fp(bp, i, napi));
11194         bnx2x_free_mem(bp);
11195
11196         bp->state = BNX2X_STATE_CLOSED;
11197
11198         netif_carrier_off(bp->dev);
11199
11200         return 0;
11201 }
11202
11203 static void bnx2x_eeh_recover(struct bnx2x *bp)
11204 {
11205         u32 val;
11206
11207         mutex_init(&bp->port.phy_mutex);
11208
11209         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11210         bp->link_params.shmem_base = bp->common.shmem_base;
11211         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11212
11213         if (!bp->common.shmem_base ||
11214             (bp->common.shmem_base < 0xA0000) ||
11215             (bp->common.shmem_base >= 0xC0000)) {
11216                 BNX2X_DEV_INFO("MCP not active\n");
11217                 bp->flags |= NO_MCP_FLAG;
11218                 return;
11219         }
11220
11221         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11222         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11223                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11224                 BNX2X_ERR("BAD MCP validity signature\n");
11225
11226         if (!BP_NOMCP(bp)) {
11227                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11228                               & DRV_MSG_SEQ_NUMBER_MASK);
11229                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11230         }
11231 }
11232
11233 /**
11234  * bnx2x_io_error_detected - called when PCI error is detected
11235  * @pdev: Pointer to PCI device
11236  * @state: The current pci connection state
11237  *
11238  * This function is called after a PCI bus error affecting
11239  * this device has been detected.
11240  */
11241 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11242                                                 pci_channel_state_t state)
11243 {
11244         struct net_device *dev = pci_get_drvdata(pdev);
11245         struct bnx2x *bp = netdev_priv(dev);
11246
11247         rtnl_lock();
11248
11249         netif_device_detach(dev);
11250
11251         if (netif_running(dev))
11252                 bnx2x_eeh_nic_unload(bp);
11253
11254         pci_disable_device(pdev);
11255
11256         rtnl_unlock();
11257
11258         /* Request a slot reset */
11259         return PCI_ERS_RESULT_NEED_RESET;
11260 }
11261
11262 /**
11263  * bnx2x_io_slot_reset - called after the PCI bus has been reset
11264  * @pdev: Pointer to PCI device
11265  *
11266  * Restart the card from scratch, as if from a cold-boot.
11267  */
11268 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11269 {
11270         struct net_device *dev = pci_get_drvdata(pdev);
11271         struct bnx2x *bp = netdev_priv(dev);
11272
11273         rtnl_lock();
11274
11275         if (pci_enable_device(pdev)) {
11276                 dev_err(&pdev->dev,
11277                         "Cannot re-enable PCI device after reset\n");
11278                 rtnl_unlock();
11279                 return PCI_ERS_RESULT_DISCONNECT;
11280         }
11281
11282         pci_set_master(pdev);
11283         pci_restore_state(pdev);
11284
11285         if (netif_running(dev))
11286                 bnx2x_set_power_state(bp, PCI_D0);
11287
11288         rtnl_unlock();
11289
11290         return PCI_ERS_RESULT_RECOVERED;
11291 }
11292
11293 /**
11294  * bnx2x_io_resume - called when traffic can start flowing again
11295  * @pdev: Pointer to PCI device
11296  *
11297  * This callback is called when the error recovery driver tells us that
11298  * its OK to resume normal operation.
11299  */
11300 static void bnx2x_io_resume(struct pci_dev *pdev)
11301 {
11302         struct net_device *dev = pci_get_drvdata(pdev);
11303         struct bnx2x *bp = netdev_priv(dev);
11304
11305         rtnl_lock();
11306
11307         bnx2x_eeh_recover(bp);
11308
11309         if (netif_running(dev))
11310                 bnx2x_nic_load(bp, LOAD_NORMAL);
11311
11312         netif_device_attach(dev);
11313
11314         rtnl_unlock();
11315 }
11316
11317 static struct pci_error_handlers bnx2x_err_handler = {
11318         .error_detected = bnx2x_io_error_detected,
11319         .slot_reset     = bnx2x_io_slot_reset,
11320         .resume         = bnx2x_io_resume,
11321 };
11322
11323 static struct pci_driver bnx2x_pci_driver = {
11324         .name        = DRV_MODULE_NAME,
11325         .id_table    = bnx2x_pci_tbl,
11326         .probe       = bnx2x_init_one,
11327         .remove      = __devexit_p(bnx2x_remove_one),
11328         .suspend     = bnx2x_suspend,
11329         .resume      = bnx2x_resume,
11330         .err_handler = &bnx2x_err_handler,
11331 };
11332
11333 static int __init bnx2x_init(void)
11334 {
11335         bnx2x_wq = create_singlethread_workqueue("bnx2x");
11336         if (bnx2x_wq == NULL) {
11337                 printk(KERN_ERR PFX "Cannot create workqueue\n");
11338                 return -ENOMEM;
11339         }
11340
11341         return pci_register_driver(&bnx2x_pci_driver);
11342 }
11343
11344 static void __exit bnx2x_cleanup(void)
11345 {
11346         pci_unregister_driver(&bnx2x_pci_driver);
11347
11348         destroy_workqueue(bnx2x_wq);
11349 }
11350
11351 module_init(bnx2x_init);
11352 module_exit(bnx2x_cleanup);
11353