]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/tg3.c
[TG3]: PHY fixes.
[linux-2.6-omap-h63xx.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43
44 #include <asm/system.h>
45 #include <asm/io.h>
46 #include <asm/byteorder.h>
47 #include <asm/uaccess.h>
48
49 #ifdef CONFIG_SPARC64
50 #include <asm/idprom.h>
51 #include <asm/oplib.h>
52 #include <asm/pbm.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #ifdef NETIF_F_TSO
62 #define TG3_TSO_SUPPORT 1
63 #else
64 #define TG3_TSO_SUPPORT 0
65 #endif
66
67 #include "tg3.h"
68
69 #define DRV_MODULE_NAME         "tg3"
70 #define PFX DRV_MODULE_NAME     ": "
71 #define DRV_MODULE_VERSION      "3.65"
72 #define DRV_MODULE_RELDATE      "August 07, 2006"
73
74 #define TG3_DEF_MAC_MODE        0
75 #define TG3_DEF_RX_MODE         0
76 #define TG3_DEF_TX_MODE         0
77 #define TG3_DEF_MSG_ENABLE        \
78         (NETIF_MSG_DRV          | \
79          NETIF_MSG_PROBE        | \
80          NETIF_MSG_LINK         | \
81          NETIF_MSG_TIMER        | \
82          NETIF_MSG_IFDOWN       | \
83          NETIF_MSG_IFUP         | \
84          NETIF_MSG_RX_ERR       | \
85          NETIF_MSG_TX_ERR)
86
87 /* length of time before we decide the hardware is borked,
88  * and dev->tx_timeout() should be called to fix the problem
89  */
90 #define TG3_TX_TIMEOUT                  (5 * HZ)
91
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU                     60
94 #define TG3_MAX_MTU(tp) \
95         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
96
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98  * You can't change the ring sizes, but you can change where you place
99  * them in the NIC onboard memory.
100  */
101 #define TG3_RX_RING_SIZE                512
102 #define TG3_DEF_RX_RING_PENDING         200
103 #define TG3_RX_JUMBO_RING_SIZE          256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
105
106 /* Do not place this n-ring entries value into the tp struct itself,
107  * we really want to expose these constants to GCC so that modulo et
108  * al.  operations are done with shifts and masks instead of with
109  * hw multiply/modulo instructions.  Another solution would be to
110  * replace things like '% foo' with '& (foo - 1)'.
111  */
112 #define TG3_RX_RCB_RING_SIZE(tp)        \
113         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
114
115 #define TG3_TX_RING_SIZE                512
116 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
117
118 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_RING_SIZE)
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121                                  TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123                                    TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
125                                  TG3_TX_RING_SIZE)
126 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
127
128 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
129 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
130
131 /* minimum number of free TX descriptors required to wake up TX process */
132 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
133
134 /* number of ETHTOOL_GSTATS u64's */
135 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
136
137 #define TG3_NUM_TEST            6
138
139 static char version[] __devinitdata =
140         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
141
142 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
143 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
144 MODULE_LICENSE("GPL");
145 MODULE_VERSION(DRV_MODULE_VERSION);
146
147 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
148 module_param(tg3_debug, int, 0);
149 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
150
151 static struct pci_device_id tg3_pci_tbl[] = {
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
200         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
201         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
202         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
203         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
204         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
205         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
206         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
207         {}
208 };
209
210 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
211
212 static const struct {
213         const char string[ETH_GSTRING_LEN];
214 } ethtool_stats_keys[TG3_NUM_STATS] = {
215         { "rx_octets" },
216         { "rx_fragments" },
217         { "rx_ucast_packets" },
218         { "rx_mcast_packets" },
219         { "rx_bcast_packets" },
220         { "rx_fcs_errors" },
221         { "rx_align_errors" },
222         { "rx_xon_pause_rcvd" },
223         { "rx_xoff_pause_rcvd" },
224         { "rx_mac_ctrl_rcvd" },
225         { "rx_xoff_entered" },
226         { "rx_frame_too_long_errors" },
227         { "rx_jabbers" },
228         { "rx_undersize_packets" },
229         { "rx_in_length_errors" },
230         { "rx_out_length_errors" },
231         { "rx_64_or_less_octet_packets" },
232         { "rx_65_to_127_octet_packets" },
233         { "rx_128_to_255_octet_packets" },
234         { "rx_256_to_511_octet_packets" },
235         { "rx_512_to_1023_octet_packets" },
236         { "rx_1024_to_1522_octet_packets" },
237         { "rx_1523_to_2047_octet_packets" },
238         { "rx_2048_to_4095_octet_packets" },
239         { "rx_4096_to_8191_octet_packets" },
240         { "rx_8192_to_9022_octet_packets" },
241
242         { "tx_octets" },
243         { "tx_collisions" },
244
245         { "tx_xon_sent" },
246         { "tx_xoff_sent" },
247         { "tx_flow_control" },
248         { "tx_mac_errors" },
249         { "tx_single_collisions" },
250         { "tx_mult_collisions" },
251         { "tx_deferred" },
252         { "tx_excessive_collisions" },
253         { "tx_late_collisions" },
254         { "tx_collide_2times" },
255         { "tx_collide_3times" },
256         { "tx_collide_4times" },
257         { "tx_collide_5times" },
258         { "tx_collide_6times" },
259         { "tx_collide_7times" },
260         { "tx_collide_8times" },
261         { "tx_collide_9times" },
262         { "tx_collide_10times" },
263         { "tx_collide_11times" },
264         { "tx_collide_12times" },
265         { "tx_collide_13times" },
266         { "tx_collide_14times" },
267         { "tx_collide_15times" },
268         { "tx_ucast_packets" },
269         { "tx_mcast_packets" },
270         { "tx_bcast_packets" },
271         { "tx_carrier_sense_errors" },
272         { "tx_discards" },
273         { "tx_errors" },
274
275         { "dma_writeq_full" },
276         { "dma_write_prioq_full" },
277         { "rxbds_empty" },
278         { "rx_discards" },
279         { "rx_errors" },
280         { "rx_threshold_hit" },
281
282         { "dma_readq_full" },
283         { "dma_read_prioq_full" },
284         { "tx_comp_queue_full" },
285
286         { "ring_set_send_prod_index" },
287         { "ring_status_update" },
288         { "nic_irqs" },
289         { "nic_avoided_irqs" },
290         { "nic_tx_threshold_hit" }
291 };
292
293 static const struct {
294         const char string[ETH_GSTRING_LEN];
295 } ethtool_test_keys[TG3_NUM_TEST] = {
296         { "nvram test     (online) " },
297         { "link test      (online) " },
298         { "register test  (offline)" },
299         { "memory test    (offline)" },
300         { "loopback test  (offline)" },
301         { "interrupt test (offline)" },
302 };
303
304 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
305 {
306         writel(val, tp->regs + off);
307 }
308
309 static u32 tg3_read32(struct tg3 *tp, u32 off)
310 {
311         return (readl(tp->regs + off));
312 }
313
314 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
315 {
316         unsigned long flags;
317
318         spin_lock_irqsave(&tp->indirect_lock, flags);
319         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
320         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
321         spin_unlock_irqrestore(&tp->indirect_lock, flags);
322 }
323
324 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
325 {
326         writel(val, tp->regs + off);
327         readl(tp->regs + off);
328 }
329
330 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
331 {
332         unsigned long flags;
333         u32 val;
334
335         spin_lock_irqsave(&tp->indirect_lock, flags);
336         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
337         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
338         spin_unlock_irqrestore(&tp->indirect_lock, flags);
339         return val;
340 }
341
342 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
343 {
344         unsigned long flags;
345
346         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
347                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
348                                        TG3_64BIT_REG_LOW, val);
349                 return;
350         }
351         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
352                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
353                                        TG3_64BIT_REG_LOW, val);
354                 return;
355         }
356
357         spin_lock_irqsave(&tp->indirect_lock, flags);
358         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
359         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
360         spin_unlock_irqrestore(&tp->indirect_lock, flags);
361
362         /* In indirect mode when disabling interrupts, we also need
363          * to clear the interrupt bit in the GRC local ctrl register.
364          */
365         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
366             (val == 0x1)) {
367                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
368                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
369         }
370 }
371
372 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
373 {
374         unsigned long flags;
375         u32 val;
376
377         spin_lock_irqsave(&tp->indirect_lock, flags);
378         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
379         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
380         spin_unlock_irqrestore(&tp->indirect_lock, flags);
381         return val;
382 }
383
384 /* usec_wait specifies the wait time in usec when writing to certain registers
385  * where it is unsafe to read back the register without some delay.
386  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
387  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
388  */
389 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
390 {
391         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
392             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
393                 /* Non-posted methods */
394                 tp->write32(tp, off, val);
395         else {
396                 /* Posted method */
397                 tg3_write32(tp, off, val);
398                 if (usec_wait)
399                         udelay(usec_wait);
400                 tp->read32(tp, off);
401         }
402         /* Wait again after the read for the posted method to guarantee that
403          * the wait time is met.
404          */
405         if (usec_wait)
406                 udelay(usec_wait);
407 }
408
409 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
410 {
411         tp->write32_mbox(tp, off, val);
412         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
413             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
414                 tp->read32_mbox(tp, off);
415 }
416
417 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
418 {
419         void __iomem *mbox = tp->regs + off;
420         writel(val, mbox);
421         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
422                 writel(val, mbox);
423         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
424                 readl(mbox);
425 }
426
427 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
428 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
429 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
430 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
431 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
432
433 #define tw32(reg,val)           tp->write32(tp, reg, val)
434 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
435 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
436 #define tr32(reg)               tp->read32(tp, reg)
437
438 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
439 {
440         unsigned long flags;
441
442         spin_lock_irqsave(&tp->indirect_lock, flags);
443         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
444                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
445                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
446
447                 /* Always leave this as zero. */
448                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
449         } else {
450                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
451                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
452
453                 /* Always leave this as zero. */
454                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
455         }
456         spin_unlock_irqrestore(&tp->indirect_lock, flags);
457 }
458
459 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
460 {
461         unsigned long flags;
462
463         spin_lock_irqsave(&tp->indirect_lock, flags);
464         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
465                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
466                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
467
468                 /* Always leave this as zero. */
469                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
470         } else {
471                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
472                 *val = tr32(TG3PCI_MEM_WIN_DATA);
473
474                 /* Always leave this as zero. */
475                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
476         }
477         spin_unlock_irqrestore(&tp->indirect_lock, flags);
478 }
479
480 static void tg3_disable_ints(struct tg3 *tp)
481 {
482         tw32(TG3PCI_MISC_HOST_CTRL,
483              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
484         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
485 }
486
487 static inline void tg3_cond_int(struct tg3 *tp)
488 {
489         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
490             (tp->hw_status->status & SD_STATUS_UPDATED))
491                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
492 }
493
494 static void tg3_enable_ints(struct tg3 *tp)
495 {
496         tp->irq_sync = 0;
497         wmb();
498
499         tw32(TG3PCI_MISC_HOST_CTRL,
500              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
501         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
502                        (tp->last_tag << 24));
503         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
504                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
505                                (tp->last_tag << 24));
506         tg3_cond_int(tp);
507 }
508
509 static inline unsigned int tg3_has_work(struct tg3 *tp)
510 {
511         struct tg3_hw_status *sblk = tp->hw_status;
512         unsigned int work_exists = 0;
513
514         /* check for phy events */
515         if (!(tp->tg3_flags &
516               (TG3_FLAG_USE_LINKCHG_REG |
517                TG3_FLAG_POLL_SERDES))) {
518                 if (sblk->status & SD_STATUS_LINK_CHG)
519                         work_exists = 1;
520         }
521         /* check for RX/TX work to do */
522         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
523             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
524                 work_exists = 1;
525
526         return work_exists;
527 }
528
529 /* tg3_restart_ints
530  *  similar to tg3_enable_ints, but it accurately determines whether there
531  *  is new work pending and can return without flushing the PIO write
532  *  which reenables interrupts
533  */
534 static void tg3_restart_ints(struct tg3 *tp)
535 {
536         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
537                      tp->last_tag << 24);
538         mmiowb();
539
540         /* When doing tagged status, this work check is unnecessary.
541          * The last_tag we write above tells the chip which piece of
542          * work we've completed.
543          */
544         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
545             tg3_has_work(tp))
546                 tw32(HOSTCC_MODE, tp->coalesce_mode |
547                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
548 }
549
550 static inline void tg3_netif_stop(struct tg3 *tp)
551 {
552         tp->dev->trans_start = jiffies; /* prevent tx timeout */
553         netif_poll_disable(tp->dev);
554         netif_tx_disable(tp->dev);
555 }
556
557 static inline void tg3_netif_start(struct tg3 *tp)
558 {
559         netif_wake_queue(tp->dev);
560         /* NOTE: unconditional netif_wake_queue is only appropriate
561          * so long as all callers are assured to have free tx slots
562          * (such as after tg3_init_hw)
563          */
564         netif_poll_enable(tp->dev);
565         tp->hw_status->status |= SD_STATUS_UPDATED;
566         tg3_enable_ints(tp);
567 }
568
569 static void tg3_switch_clocks(struct tg3 *tp)
570 {
571         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
572         u32 orig_clock_ctrl;
573
574         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
575                 return;
576
577         orig_clock_ctrl = clock_ctrl;
578         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
579                        CLOCK_CTRL_CLKRUN_OENABLE |
580                        0x1f);
581         tp->pci_clock_ctrl = clock_ctrl;
582
583         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
584                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
585                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
586                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
587                 }
588         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
589                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
590                             clock_ctrl |
591                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
592                             40);
593                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
594                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
595                             40);
596         }
597         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
598 }
599
600 #define PHY_BUSY_LOOPS  5000
601
602 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
603 {
604         u32 frame_val;
605         unsigned int loops;
606         int ret;
607
608         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
609                 tw32_f(MAC_MI_MODE,
610                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
611                 udelay(80);
612         }
613
614         *val = 0x0;
615
616         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
617                       MI_COM_PHY_ADDR_MASK);
618         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
619                       MI_COM_REG_ADDR_MASK);
620         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
621
622         tw32_f(MAC_MI_COM, frame_val);
623
624         loops = PHY_BUSY_LOOPS;
625         while (loops != 0) {
626                 udelay(10);
627                 frame_val = tr32(MAC_MI_COM);
628
629                 if ((frame_val & MI_COM_BUSY) == 0) {
630                         udelay(5);
631                         frame_val = tr32(MAC_MI_COM);
632                         break;
633                 }
634                 loops -= 1;
635         }
636
637         ret = -EBUSY;
638         if (loops != 0) {
639                 *val = frame_val & MI_COM_DATA_MASK;
640                 ret = 0;
641         }
642
643         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
644                 tw32_f(MAC_MI_MODE, tp->mi_mode);
645                 udelay(80);
646         }
647
648         return ret;
649 }
650
651 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
652 {
653         u32 frame_val;
654         unsigned int loops;
655         int ret;
656
657         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
658                 tw32_f(MAC_MI_MODE,
659                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
660                 udelay(80);
661         }
662
663         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
664                       MI_COM_PHY_ADDR_MASK);
665         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
666                       MI_COM_REG_ADDR_MASK);
667         frame_val |= (val & MI_COM_DATA_MASK);
668         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
669
670         tw32_f(MAC_MI_COM, frame_val);
671
672         loops = PHY_BUSY_LOOPS;
673         while (loops != 0) {
674                 udelay(10);
675                 frame_val = tr32(MAC_MI_COM);
676                 if ((frame_val & MI_COM_BUSY) == 0) {
677                         udelay(5);
678                         frame_val = tr32(MAC_MI_COM);
679                         break;
680                 }
681                 loops -= 1;
682         }
683
684         ret = -EBUSY;
685         if (loops != 0)
686                 ret = 0;
687
688         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
689                 tw32_f(MAC_MI_MODE, tp->mi_mode);
690                 udelay(80);
691         }
692
693         return ret;
694 }
695
696 static void tg3_phy_set_wirespeed(struct tg3 *tp)
697 {
698         u32 val;
699
700         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
701                 return;
702
703         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
704             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
705                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
706                              (val | (1 << 15) | (1 << 4)));
707 }
708
709 static int tg3_bmcr_reset(struct tg3 *tp)
710 {
711         u32 phy_control;
712         int limit, err;
713
714         /* OK, reset it, and poll the BMCR_RESET bit until it
715          * clears or we time out.
716          */
717         phy_control = BMCR_RESET;
718         err = tg3_writephy(tp, MII_BMCR, phy_control);
719         if (err != 0)
720                 return -EBUSY;
721
722         limit = 5000;
723         while (limit--) {
724                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
725                 if (err != 0)
726                         return -EBUSY;
727
728                 if ((phy_control & BMCR_RESET) == 0) {
729                         udelay(40);
730                         break;
731                 }
732                 udelay(10);
733         }
734         if (limit <= 0)
735                 return -EBUSY;
736
737         return 0;
738 }
739
740 static int tg3_wait_macro_done(struct tg3 *tp)
741 {
742         int limit = 100;
743
744         while (limit--) {
745                 u32 tmp32;
746
747                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
748                         if ((tmp32 & 0x1000) == 0)
749                                 break;
750                 }
751         }
752         if (limit <= 0)
753                 return -EBUSY;
754
755         return 0;
756 }
757
758 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
759 {
760         static const u32 test_pat[4][6] = {
761         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
762         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
763         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
764         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
765         };
766         int chan;
767
768         for (chan = 0; chan < 4; chan++) {
769                 int i;
770
771                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
772                              (chan * 0x2000) | 0x0200);
773                 tg3_writephy(tp, 0x16, 0x0002);
774
775                 for (i = 0; i < 6; i++)
776                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
777                                      test_pat[chan][i]);
778
779                 tg3_writephy(tp, 0x16, 0x0202);
780                 if (tg3_wait_macro_done(tp)) {
781                         *resetp = 1;
782                         return -EBUSY;
783                 }
784
785                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
786                              (chan * 0x2000) | 0x0200);
787                 tg3_writephy(tp, 0x16, 0x0082);
788                 if (tg3_wait_macro_done(tp)) {
789                         *resetp = 1;
790                         return -EBUSY;
791                 }
792
793                 tg3_writephy(tp, 0x16, 0x0802);
794                 if (tg3_wait_macro_done(tp)) {
795                         *resetp = 1;
796                         return -EBUSY;
797                 }
798
799                 for (i = 0; i < 6; i += 2) {
800                         u32 low, high;
801
802                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
803                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
804                             tg3_wait_macro_done(tp)) {
805                                 *resetp = 1;
806                                 return -EBUSY;
807                         }
808                         low &= 0x7fff;
809                         high &= 0x000f;
810                         if (low != test_pat[chan][i] ||
811                             high != test_pat[chan][i+1]) {
812                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
813                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
814                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
815
816                                 return -EBUSY;
817                         }
818                 }
819         }
820
821         return 0;
822 }
823
824 static int tg3_phy_reset_chanpat(struct tg3 *tp)
825 {
826         int chan;
827
828         for (chan = 0; chan < 4; chan++) {
829                 int i;
830
831                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
832                              (chan * 0x2000) | 0x0200);
833                 tg3_writephy(tp, 0x16, 0x0002);
834                 for (i = 0; i < 6; i++)
835                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
836                 tg3_writephy(tp, 0x16, 0x0202);
837                 if (tg3_wait_macro_done(tp))
838                         return -EBUSY;
839         }
840
841         return 0;
842 }
843
844 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
845 {
846         u32 reg32, phy9_orig;
847         int retries, do_phy_reset, err;
848
849         retries = 10;
850         do_phy_reset = 1;
851         do {
852                 if (do_phy_reset) {
853                         err = tg3_bmcr_reset(tp);
854                         if (err)
855                                 return err;
856                         do_phy_reset = 0;
857                 }
858
859                 /* Disable transmitter and interrupt.  */
860                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
861                         continue;
862
863                 reg32 |= 0x3000;
864                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
865
866                 /* Set full-duplex, 1000 mbps.  */
867                 tg3_writephy(tp, MII_BMCR,
868                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
869
870                 /* Set to master mode.  */
871                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
872                         continue;
873
874                 tg3_writephy(tp, MII_TG3_CTRL,
875                              (MII_TG3_CTRL_AS_MASTER |
876                               MII_TG3_CTRL_ENABLE_AS_MASTER));
877
878                 /* Enable SM_DSP_CLOCK and 6dB.  */
879                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
880
881                 /* Block the PHY control access.  */
882                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
883                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
884
885                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
886                 if (!err)
887                         break;
888         } while (--retries);
889
890         err = tg3_phy_reset_chanpat(tp);
891         if (err)
892                 return err;
893
894         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
895         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
896
897         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
898         tg3_writephy(tp, 0x16, 0x0000);
899
900         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
901             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
902                 /* Set Extended packet length bit for jumbo frames */
903                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
904         }
905         else {
906                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
907         }
908
909         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
910
911         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
912                 reg32 &= ~0x3000;
913                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
914         } else if (!err)
915                 err = -EBUSY;
916
917         return err;
918 }
919
920 static void tg3_link_report(struct tg3 *);
921
922 /* This will reset the tigon3 PHY if there is no valid
923  * link unless the FORCE argument is non-zero.
924  */
925 static int tg3_phy_reset(struct tg3 *tp)
926 {
927         u32 phy_status;
928         int err;
929
930         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
931         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
932         if (err != 0)
933                 return -EBUSY;
934
935         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
936                 netif_carrier_off(tp->dev);
937                 tg3_link_report(tp);
938         }
939
940         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
941             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
942             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
943                 err = tg3_phy_reset_5703_4_5(tp);
944                 if (err)
945                         return err;
946                 goto out;
947         }
948
949         err = tg3_bmcr_reset(tp);
950         if (err)
951                 return err;
952
953 out:
954         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
955                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
956                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
957                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
958                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
959                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
960                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
961         }
962         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
963                 tg3_writephy(tp, 0x1c, 0x8d68);
964                 tg3_writephy(tp, 0x1c, 0x8d68);
965         }
966         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
967                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
968                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
969                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
970                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
971                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
972                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
973                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
974                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
975         }
976         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
977                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
978                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
979                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
980                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
981         }
982         /* Set Extended packet length bit (bit 14) on all chips that */
983         /* support jumbo frames */
984         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
985                 /* Cannot do read-modify-write on 5401 */
986                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
987         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
988                 u32 phy_reg;
989
990                 /* Set bit 14 with read-modify-write to preserve other bits */
991                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
992                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
993                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
994         }
995
996         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
997          * jumbo frames transmission.
998          */
999         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1000                 u32 phy_reg;
1001
1002                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1003                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1004                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1005         }
1006
1007         tg3_phy_set_wirespeed(tp);
1008         return 0;
1009 }
1010
1011 static void tg3_frob_aux_power(struct tg3 *tp)
1012 {
1013         struct tg3 *tp_peer = tp;
1014
1015         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1016                 return;
1017
1018         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1019             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1020                 struct net_device *dev_peer;
1021
1022                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1023                 /* remove_one() may have been run on the peer. */
1024                 if (!dev_peer)
1025                         tp_peer = tp;
1026                 else
1027                         tp_peer = netdev_priv(dev_peer);
1028         }
1029
1030         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1031             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1032             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1033             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1034                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1035                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1036                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1037                                     (GRC_LCLCTRL_GPIO_OE0 |
1038                                      GRC_LCLCTRL_GPIO_OE1 |
1039                                      GRC_LCLCTRL_GPIO_OE2 |
1040                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1041                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1042                                     100);
1043                 } else {
1044                         u32 no_gpio2;
1045                         u32 grc_local_ctrl = 0;
1046
1047                         if (tp_peer != tp &&
1048                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1049                                 return;
1050
1051                         /* Workaround to prevent overdrawing Amps. */
1052                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1053                             ASIC_REV_5714) {
1054                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1055                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1056                                             grc_local_ctrl, 100);
1057                         }
1058
1059                         /* On 5753 and variants, GPIO2 cannot be used. */
1060                         no_gpio2 = tp->nic_sram_data_cfg &
1061                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1062
1063                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1064                                          GRC_LCLCTRL_GPIO_OE1 |
1065                                          GRC_LCLCTRL_GPIO_OE2 |
1066                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1067                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1068                         if (no_gpio2) {
1069                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1070                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1071                         }
1072                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1073                                                     grc_local_ctrl, 100);
1074
1075                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1076
1077                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1078                                                     grc_local_ctrl, 100);
1079
1080                         if (!no_gpio2) {
1081                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1082                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1083                                             grc_local_ctrl, 100);
1084                         }
1085                 }
1086         } else {
1087                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1088                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1089                         if (tp_peer != tp &&
1090                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1091                                 return;
1092
1093                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1094                                     (GRC_LCLCTRL_GPIO_OE1 |
1095                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1096
1097                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1098                                     GRC_LCLCTRL_GPIO_OE1, 100);
1099
1100                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1101                                     (GRC_LCLCTRL_GPIO_OE1 |
1102                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1103                 }
1104         }
1105 }
1106
1107 static int tg3_setup_phy(struct tg3 *, int);
1108
1109 #define RESET_KIND_SHUTDOWN     0
1110 #define RESET_KIND_INIT         1
1111 #define RESET_KIND_SUSPEND      2
1112
1113 static void tg3_write_sig_post_reset(struct tg3 *, int);
1114 static int tg3_halt_cpu(struct tg3 *, u32);
1115 static int tg3_nvram_lock(struct tg3 *);
1116 static void tg3_nvram_unlock(struct tg3 *);
1117
1118 static void tg3_power_down_phy(struct tg3 *tp)
1119 {
1120         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
1121                 return;
1122
1123         tg3_writephy(tp, MII_TG3_EXT_CTRL, MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1124         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1125
1126         /* The PHY should not be powered down on some chips because
1127          * of bugs.
1128          */
1129         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1130             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1131             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1132              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1133                 return;
1134         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1135 }
1136
1137 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1138 {
1139         u32 misc_host_ctrl;
1140         u16 power_control, power_caps;
1141         int pm = tp->pm_cap;
1142
1143         /* Make sure register accesses (indirect or otherwise)
1144          * will function correctly.
1145          */
1146         pci_write_config_dword(tp->pdev,
1147                                TG3PCI_MISC_HOST_CTRL,
1148                                tp->misc_host_ctrl);
1149
1150         pci_read_config_word(tp->pdev,
1151                              pm + PCI_PM_CTRL,
1152                              &power_control);
1153         power_control |= PCI_PM_CTRL_PME_STATUS;
1154         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1155         switch (state) {
1156         case PCI_D0:
1157                 power_control |= 0;
1158                 pci_write_config_word(tp->pdev,
1159                                       pm + PCI_PM_CTRL,
1160                                       power_control);
1161                 udelay(100);    /* Delay after power state change */
1162
1163                 /* Switch out of Vaux if it is not a LOM */
1164                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1165                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1166
1167                 return 0;
1168
1169         case PCI_D1:
1170                 power_control |= 1;
1171                 break;
1172
1173         case PCI_D2:
1174                 power_control |= 2;
1175                 break;
1176
1177         case PCI_D3hot:
1178                 power_control |= 3;
1179                 break;
1180
1181         default:
1182                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1183                        "requested.\n",
1184                        tp->dev->name, state);
1185                 return -EINVAL;
1186         };
1187
1188         power_control |= PCI_PM_CTRL_PME_ENABLE;
1189
1190         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1191         tw32(TG3PCI_MISC_HOST_CTRL,
1192              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1193
1194         if (tp->link_config.phy_is_low_power == 0) {
1195                 tp->link_config.phy_is_low_power = 1;
1196                 tp->link_config.orig_speed = tp->link_config.speed;
1197                 tp->link_config.orig_duplex = tp->link_config.duplex;
1198                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1199         }
1200
1201         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1202                 tp->link_config.speed = SPEED_10;
1203                 tp->link_config.duplex = DUPLEX_HALF;
1204                 tp->link_config.autoneg = AUTONEG_ENABLE;
1205                 tg3_setup_phy(tp, 0);
1206         }
1207
1208         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1209                 int i;
1210                 u32 val;
1211
1212                 for (i = 0; i < 200; i++) {
1213                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1214                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1215                                 break;
1216                         msleep(1);
1217                 }
1218         }
1219         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1220                                              WOL_DRV_STATE_SHUTDOWN |
1221                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1222
1223         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1224
1225         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1226                 u32 mac_mode;
1227
1228                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1229                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1230                         udelay(40);
1231
1232                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1233                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1234                         else
1235                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1236
1237                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1238                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1239                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1240                 } else {
1241                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1242                 }
1243
1244                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1245                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1246
1247                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1248                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1249                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1250
1251                 tw32_f(MAC_MODE, mac_mode);
1252                 udelay(100);
1253
1254                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1255                 udelay(10);
1256         }
1257
1258         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1259             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1260              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1261                 u32 base_val;
1262
1263                 base_val = tp->pci_clock_ctrl;
1264                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1265                              CLOCK_CTRL_TXCLK_DISABLE);
1266
1267                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1268                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1269         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1270                 /* do nothing */
1271         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1272                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1273                 u32 newbits1, newbits2;
1274
1275                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1276                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1277                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1278                                     CLOCK_CTRL_TXCLK_DISABLE |
1279                                     CLOCK_CTRL_ALTCLK);
1280                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1281                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1282                         newbits1 = CLOCK_CTRL_625_CORE;
1283                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1284                 } else {
1285                         newbits1 = CLOCK_CTRL_ALTCLK;
1286                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1287                 }
1288
1289                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1290                             40);
1291
1292                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1293                             40);
1294
1295                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1296                         u32 newbits3;
1297
1298                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1299                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1300                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1301                                             CLOCK_CTRL_TXCLK_DISABLE |
1302                                             CLOCK_CTRL_44MHZ_CORE);
1303                         } else {
1304                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1305                         }
1306
1307                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1308                                     tp->pci_clock_ctrl | newbits3, 40);
1309                 }
1310         }
1311
1312         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1313             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1314                 tg3_power_down_phy(tp);
1315
1316         tg3_frob_aux_power(tp);
1317
1318         /* Workaround for unstable PLL clock */
1319         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1320             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1321                 u32 val = tr32(0x7d00);
1322
1323                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1324                 tw32(0x7d00, val);
1325                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1326                         int err;
1327
1328                         err = tg3_nvram_lock(tp);
1329                         tg3_halt_cpu(tp, RX_CPU_BASE);
1330                         if (!err)
1331                                 tg3_nvram_unlock(tp);
1332                 }
1333         }
1334
1335         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1336
1337         /* Finally, set the new power state. */
1338         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1339         udelay(100);    /* Delay after power state change */
1340
1341         return 0;
1342 }
1343
1344 static void tg3_link_report(struct tg3 *tp)
1345 {
1346         if (!netif_carrier_ok(tp->dev)) {
1347                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1348         } else {
1349                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1350                        tp->dev->name,
1351                        (tp->link_config.active_speed == SPEED_1000 ?
1352                         1000 :
1353                         (tp->link_config.active_speed == SPEED_100 ?
1354                          100 : 10)),
1355                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1356                         "full" : "half"));
1357
1358                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1359                        "%s for RX.\n",
1360                        tp->dev->name,
1361                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1362                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1363         }
1364 }
1365
1366 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1367 {
1368         u32 new_tg3_flags = 0;
1369         u32 old_rx_mode = tp->rx_mode;
1370         u32 old_tx_mode = tp->tx_mode;
1371
1372         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1373
1374                 /* Convert 1000BaseX flow control bits to 1000BaseT
1375                  * bits before resolving flow control.
1376                  */
1377                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1378                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1379                                        ADVERTISE_PAUSE_ASYM);
1380                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1381
1382                         if (local_adv & ADVERTISE_1000XPAUSE)
1383                                 local_adv |= ADVERTISE_PAUSE_CAP;
1384                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1385                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1386                         if (remote_adv & LPA_1000XPAUSE)
1387                                 remote_adv |= LPA_PAUSE_CAP;
1388                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1389                                 remote_adv |= LPA_PAUSE_ASYM;
1390                 }
1391
1392                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1393                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1394                                 if (remote_adv & LPA_PAUSE_CAP)
1395                                         new_tg3_flags |=
1396                                                 (TG3_FLAG_RX_PAUSE |
1397                                                 TG3_FLAG_TX_PAUSE);
1398                                 else if (remote_adv & LPA_PAUSE_ASYM)
1399                                         new_tg3_flags |=
1400                                                 (TG3_FLAG_RX_PAUSE);
1401                         } else {
1402                                 if (remote_adv & LPA_PAUSE_CAP)
1403                                         new_tg3_flags |=
1404                                                 (TG3_FLAG_RX_PAUSE |
1405                                                 TG3_FLAG_TX_PAUSE);
1406                         }
1407                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1408                         if ((remote_adv & LPA_PAUSE_CAP) &&
1409                         (remote_adv & LPA_PAUSE_ASYM))
1410                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1411                 }
1412
1413                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1414                 tp->tg3_flags |= new_tg3_flags;
1415         } else {
1416                 new_tg3_flags = tp->tg3_flags;
1417         }
1418
1419         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1420                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1421         else
1422                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1423
1424         if (old_rx_mode != tp->rx_mode) {
1425                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1426         }
1427
1428         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1429                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1430         else
1431                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1432
1433         if (old_tx_mode != tp->tx_mode) {
1434                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1435         }
1436 }
1437
1438 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1439 {
1440         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1441         case MII_TG3_AUX_STAT_10HALF:
1442                 *speed = SPEED_10;
1443                 *duplex = DUPLEX_HALF;
1444                 break;
1445
1446         case MII_TG3_AUX_STAT_10FULL:
1447                 *speed = SPEED_10;
1448                 *duplex = DUPLEX_FULL;
1449                 break;
1450
1451         case MII_TG3_AUX_STAT_100HALF:
1452                 *speed = SPEED_100;
1453                 *duplex = DUPLEX_HALF;
1454                 break;
1455
1456         case MII_TG3_AUX_STAT_100FULL:
1457                 *speed = SPEED_100;
1458                 *duplex = DUPLEX_FULL;
1459                 break;
1460
1461         case MII_TG3_AUX_STAT_1000HALF:
1462                 *speed = SPEED_1000;
1463                 *duplex = DUPLEX_HALF;
1464                 break;
1465
1466         case MII_TG3_AUX_STAT_1000FULL:
1467                 *speed = SPEED_1000;
1468                 *duplex = DUPLEX_FULL;
1469                 break;
1470
1471         default:
1472                 *speed = SPEED_INVALID;
1473                 *duplex = DUPLEX_INVALID;
1474                 break;
1475         };
1476 }
1477
1478 static void tg3_phy_copper_begin(struct tg3 *tp)
1479 {
1480         u32 new_adv;
1481         int i;
1482
1483         if (tp->link_config.phy_is_low_power) {
1484                 /* Entering low power mode.  Disable gigabit and
1485                  * 100baseT advertisements.
1486                  */
1487                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1488
1489                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1490                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1491                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1492                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1493
1494                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1495         } else if (tp->link_config.speed == SPEED_INVALID) {
1496                 tp->link_config.advertising =
1497                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1498                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1499                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1500                          ADVERTISED_Autoneg | ADVERTISED_MII);
1501
1502                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1503                         tp->link_config.advertising &=
1504                                 ~(ADVERTISED_1000baseT_Half |
1505                                   ADVERTISED_1000baseT_Full);
1506
1507                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1508                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1509                         new_adv |= ADVERTISE_10HALF;
1510                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1511                         new_adv |= ADVERTISE_10FULL;
1512                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1513                         new_adv |= ADVERTISE_100HALF;
1514                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1515                         new_adv |= ADVERTISE_100FULL;
1516                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1517
1518                 if (tp->link_config.advertising &
1519                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1520                         new_adv = 0;
1521                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1522                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1523                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1524                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1525                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1526                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1527                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1528                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1529                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1530                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1531                 } else {
1532                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1533                 }
1534         } else {
1535                 /* Asking for a specific link mode. */
1536                 if (tp->link_config.speed == SPEED_1000) {
1537                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1538                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1539
1540                         if (tp->link_config.duplex == DUPLEX_FULL)
1541                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1542                         else
1543                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1544                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1545                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1546                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1547                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1548                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1549                 } else {
1550                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1551
1552                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1553                         if (tp->link_config.speed == SPEED_100) {
1554                                 if (tp->link_config.duplex == DUPLEX_FULL)
1555                                         new_adv |= ADVERTISE_100FULL;
1556                                 else
1557                                         new_adv |= ADVERTISE_100HALF;
1558                         } else {
1559                                 if (tp->link_config.duplex == DUPLEX_FULL)
1560                                         new_adv |= ADVERTISE_10FULL;
1561                                 else
1562                                         new_adv |= ADVERTISE_10HALF;
1563                         }
1564                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1565                 }
1566         }
1567
1568         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1569             tp->link_config.speed != SPEED_INVALID) {
1570                 u32 bmcr, orig_bmcr;
1571
1572                 tp->link_config.active_speed = tp->link_config.speed;
1573                 tp->link_config.active_duplex = tp->link_config.duplex;
1574
1575                 bmcr = 0;
1576                 switch (tp->link_config.speed) {
1577                 default:
1578                 case SPEED_10:
1579                         break;
1580
1581                 case SPEED_100:
1582                         bmcr |= BMCR_SPEED100;
1583                         break;
1584
1585                 case SPEED_1000:
1586                         bmcr |= TG3_BMCR_SPEED1000;
1587                         break;
1588                 };
1589
1590                 if (tp->link_config.duplex == DUPLEX_FULL)
1591                         bmcr |= BMCR_FULLDPLX;
1592
1593                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1594                     (bmcr != orig_bmcr)) {
1595                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1596                         for (i = 0; i < 1500; i++) {
1597                                 u32 tmp;
1598
1599                                 udelay(10);
1600                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1601                                     tg3_readphy(tp, MII_BMSR, &tmp))
1602                                         continue;
1603                                 if (!(tmp & BMSR_LSTATUS)) {
1604                                         udelay(40);
1605                                         break;
1606                                 }
1607                         }
1608                         tg3_writephy(tp, MII_BMCR, bmcr);
1609                         udelay(40);
1610                 }
1611         } else {
1612                 tg3_writephy(tp, MII_BMCR,
1613                              BMCR_ANENABLE | BMCR_ANRESTART);
1614         }
1615 }
1616
1617 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1618 {
1619         int err;
1620
1621         /* Turn off tap power management. */
1622         /* Set Extended packet length bit */
1623         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1624
1625         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1626         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1627
1628         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1629         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1630
1631         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1632         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1633
1634         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1635         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1636
1637         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1638         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1639
1640         udelay(40);
1641
1642         return err;
1643 }
1644
1645 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1646 {
1647         u32 adv_reg, all_mask;
1648
1649         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1650                 return 0;
1651
1652         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1653                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1654         if ((adv_reg & all_mask) != all_mask)
1655                 return 0;
1656         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1657                 u32 tg3_ctrl;
1658
1659                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1660                         return 0;
1661
1662                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1663                             MII_TG3_CTRL_ADV_1000_FULL);
1664                 if ((tg3_ctrl & all_mask) != all_mask)
1665                         return 0;
1666         }
1667         return 1;
1668 }
1669
1670 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1671 {
1672         int current_link_up;
1673         u32 bmsr, dummy;
1674         u16 current_speed;
1675         u8 current_duplex;
1676         int i, err;
1677
1678         tw32(MAC_EVENT, 0);
1679
1680         tw32_f(MAC_STATUS,
1681              (MAC_STATUS_SYNC_CHANGED |
1682               MAC_STATUS_CFG_CHANGED |
1683               MAC_STATUS_MI_COMPLETION |
1684               MAC_STATUS_LNKSTATE_CHANGED));
1685         udelay(40);
1686
1687         tp->mi_mode = MAC_MI_MODE_BASE;
1688         tw32_f(MAC_MI_MODE, tp->mi_mode);
1689         udelay(80);
1690
1691         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1692
1693         /* Some third-party PHYs need to be reset on link going
1694          * down.
1695          */
1696         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1697              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1698              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1699             netif_carrier_ok(tp->dev)) {
1700                 tg3_readphy(tp, MII_BMSR, &bmsr);
1701                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1702                     !(bmsr & BMSR_LSTATUS))
1703                         force_reset = 1;
1704         }
1705         if (force_reset)
1706                 tg3_phy_reset(tp);
1707
1708         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1709                 tg3_readphy(tp, MII_BMSR, &bmsr);
1710                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1711                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1712                         bmsr = 0;
1713
1714                 if (!(bmsr & BMSR_LSTATUS)) {
1715                         err = tg3_init_5401phy_dsp(tp);
1716                         if (err)
1717                                 return err;
1718
1719                         tg3_readphy(tp, MII_BMSR, &bmsr);
1720                         for (i = 0; i < 1000; i++) {
1721                                 udelay(10);
1722                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1723                                     (bmsr & BMSR_LSTATUS)) {
1724                                         udelay(40);
1725                                         break;
1726                                 }
1727                         }
1728
1729                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1730                             !(bmsr & BMSR_LSTATUS) &&
1731                             tp->link_config.active_speed == SPEED_1000) {
1732                                 err = tg3_phy_reset(tp);
1733                                 if (!err)
1734                                         err = tg3_init_5401phy_dsp(tp);
1735                                 if (err)
1736                                         return err;
1737                         }
1738                 }
1739         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1740                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1741                 /* 5701 {A0,B0} CRC bug workaround */
1742                 tg3_writephy(tp, 0x15, 0x0a75);
1743                 tg3_writephy(tp, 0x1c, 0x8c68);
1744                 tg3_writephy(tp, 0x1c, 0x8d68);
1745                 tg3_writephy(tp, 0x1c, 0x8c68);
1746         }
1747
1748         /* Clear pending interrupts... */
1749         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1750         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1751
1752         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1753                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1754         else
1755                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1756
1757         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1758             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1759                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1760                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1761                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1762                 else
1763                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1764         }
1765
1766         current_link_up = 0;
1767         current_speed = SPEED_INVALID;
1768         current_duplex = DUPLEX_INVALID;
1769
1770         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1771                 u32 val;
1772
1773                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1774                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1775                 if (!(val & (1 << 10))) {
1776                         val |= (1 << 10);
1777                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1778                         goto relink;
1779                 }
1780         }
1781
1782         bmsr = 0;
1783         for (i = 0; i < 100; i++) {
1784                 tg3_readphy(tp, MII_BMSR, &bmsr);
1785                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1786                     (bmsr & BMSR_LSTATUS))
1787                         break;
1788                 udelay(40);
1789         }
1790
1791         if (bmsr & BMSR_LSTATUS) {
1792                 u32 aux_stat, bmcr;
1793
1794                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1795                 for (i = 0; i < 2000; i++) {
1796                         udelay(10);
1797                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1798                             aux_stat)
1799                                 break;
1800                 }
1801
1802                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1803                                              &current_speed,
1804                                              &current_duplex);
1805
1806                 bmcr = 0;
1807                 for (i = 0; i < 200; i++) {
1808                         tg3_readphy(tp, MII_BMCR, &bmcr);
1809                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1810                                 continue;
1811                         if (bmcr && bmcr != 0x7fff)
1812                                 break;
1813                         udelay(10);
1814                 }
1815
1816                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1817                         if (bmcr & BMCR_ANENABLE) {
1818                                 current_link_up = 1;
1819
1820                                 /* Force autoneg restart if we are exiting
1821                                  * low power mode.
1822                                  */
1823                                 if (!tg3_copper_is_advertising_all(tp))
1824                                         current_link_up = 0;
1825                         } else {
1826                                 current_link_up = 0;
1827                         }
1828                 } else {
1829                         if (!(bmcr & BMCR_ANENABLE) &&
1830                             tp->link_config.speed == current_speed &&
1831                             tp->link_config.duplex == current_duplex) {
1832                                 current_link_up = 1;
1833                         } else {
1834                                 current_link_up = 0;
1835                         }
1836                 }
1837
1838                 tp->link_config.active_speed = current_speed;
1839                 tp->link_config.active_duplex = current_duplex;
1840         }
1841
1842         if (current_link_up == 1 &&
1843             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1844             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1845                 u32 local_adv, remote_adv;
1846
1847                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1848                         local_adv = 0;
1849                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1850
1851                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1852                         remote_adv = 0;
1853
1854                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1855
1856                 /* If we are not advertising full pause capability,
1857                  * something is wrong.  Bring the link down and reconfigure.
1858                  */
1859                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1860                         current_link_up = 0;
1861                 } else {
1862                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1863                 }
1864         }
1865 relink:
1866         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1867                 u32 tmp;
1868
1869                 tg3_phy_copper_begin(tp);
1870
1871                 tg3_readphy(tp, MII_BMSR, &tmp);
1872                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1873                     (tmp & BMSR_LSTATUS))
1874                         current_link_up = 1;
1875         }
1876
1877         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1878         if (current_link_up == 1) {
1879                 if (tp->link_config.active_speed == SPEED_100 ||
1880                     tp->link_config.active_speed == SPEED_10)
1881                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1882                 else
1883                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1884         } else
1885                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1886
1887         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1888         if (tp->link_config.active_duplex == DUPLEX_HALF)
1889                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1890
1891         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1892         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1893                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1894                     (current_link_up == 1 &&
1895                      tp->link_config.active_speed == SPEED_10))
1896                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1897         } else {
1898                 if (current_link_up == 1)
1899                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1900         }
1901
1902         /* ??? Without this setting Netgear GA302T PHY does not
1903          * ??? send/receive packets...
1904          */
1905         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1906             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1907                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1908                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1909                 udelay(80);
1910         }
1911
1912         tw32_f(MAC_MODE, tp->mac_mode);
1913         udelay(40);
1914
1915         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1916                 /* Polled via timer. */
1917                 tw32_f(MAC_EVENT, 0);
1918         } else {
1919                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1920         }
1921         udelay(40);
1922
1923         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1924             current_link_up == 1 &&
1925             tp->link_config.active_speed == SPEED_1000 &&
1926             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1927              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1928                 udelay(120);
1929                 tw32_f(MAC_STATUS,
1930                      (MAC_STATUS_SYNC_CHANGED |
1931                       MAC_STATUS_CFG_CHANGED));
1932                 udelay(40);
1933                 tg3_write_mem(tp,
1934                               NIC_SRAM_FIRMWARE_MBOX,
1935                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1936         }
1937
1938         if (current_link_up != netif_carrier_ok(tp->dev)) {
1939                 if (current_link_up)
1940                         netif_carrier_on(tp->dev);
1941                 else
1942                         netif_carrier_off(tp->dev);
1943                 tg3_link_report(tp);
1944         }
1945
1946         return 0;
1947 }
1948
1949 struct tg3_fiber_aneginfo {
1950         int state;
1951 #define ANEG_STATE_UNKNOWN              0
1952 #define ANEG_STATE_AN_ENABLE            1
1953 #define ANEG_STATE_RESTART_INIT         2
1954 #define ANEG_STATE_RESTART              3
1955 #define ANEG_STATE_DISABLE_LINK_OK      4
1956 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1957 #define ANEG_STATE_ABILITY_DETECT       6
1958 #define ANEG_STATE_ACK_DETECT_INIT      7
1959 #define ANEG_STATE_ACK_DETECT           8
1960 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1961 #define ANEG_STATE_COMPLETE_ACK         10
1962 #define ANEG_STATE_IDLE_DETECT_INIT     11
1963 #define ANEG_STATE_IDLE_DETECT          12
1964 #define ANEG_STATE_LINK_OK              13
1965 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1966 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1967
1968         u32 flags;
1969 #define MR_AN_ENABLE            0x00000001
1970 #define MR_RESTART_AN           0x00000002
1971 #define MR_AN_COMPLETE          0x00000004
1972 #define MR_PAGE_RX              0x00000008
1973 #define MR_NP_LOADED            0x00000010
1974 #define MR_TOGGLE_TX            0x00000020
1975 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1976 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1977 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1978 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1979 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1980 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1981 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1982 #define MR_TOGGLE_RX            0x00002000
1983 #define MR_NP_RX                0x00004000
1984
1985 #define MR_LINK_OK              0x80000000
1986
1987         unsigned long link_time, cur_time;
1988
1989         u32 ability_match_cfg;
1990         int ability_match_count;
1991
1992         char ability_match, idle_match, ack_match;
1993
1994         u32 txconfig, rxconfig;
1995 #define ANEG_CFG_NP             0x00000080
1996 #define ANEG_CFG_ACK            0x00000040
1997 #define ANEG_CFG_RF2            0x00000020
1998 #define ANEG_CFG_RF1            0x00000010
1999 #define ANEG_CFG_PS2            0x00000001
2000 #define ANEG_CFG_PS1            0x00008000
2001 #define ANEG_CFG_HD             0x00004000
2002 #define ANEG_CFG_FD             0x00002000
2003 #define ANEG_CFG_INVAL          0x00001f06
2004
2005 };
2006 #define ANEG_OK         0
2007 #define ANEG_DONE       1
2008 #define ANEG_TIMER_ENAB 2
2009 #define ANEG_FAILED     -1
2010
2011 #define ANEG_STATE_SETTLE_TIME  10000
2012
2013 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2014                                    struct tg3_fiber_aneginfo *ap)
2015 {
2016         unsigned long delta;
2017         u32 rx_cfg_reg;
2018         int ret;
2019
2020         if (ap->state == ANEG_STATE_UNKNOWN) {
2021                 ap->rxconfig = 0;
2022                 ap->link_time = 0;
2023                 ap->cur_time = 0;
2024                 ap->ability_match_cfg = 0;
2025                 ap->ability_match_count = 0;
2026                 ap->ability_match = 0;
2027                 ap->idle_match = 0;
2028                 ap->ack_match = 0;
2029         }
2030         ap->cur_time++;
2031
2032         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2033                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2034
2035                 if (rx_cfg_reg != ap->ability_match_cfg) {
2036                         ap->ability_match_cfg = rx_cfg_reg;
2037                         ap->ability_match = 0;
2038                         ap->ability_match_count = 0;
2039                 } else {
2040                         if (++ap->ability_match_count > 1) {
2041                                 ap->ability_match = 1;
2042                                 ap->ability_match_cfg = rx_cfg_reg;
2043                         }
2044                 }
2045                 if (rx_cfg_reg & ANEG_CFG_ACK)
2046                         ap->ack_match = 1;
2047                 else
2048                         ap->ack_match = 0;
2049
2050                 ap->idle_match = 0;
2051         } else {
2052                 ap->idle_match = 1;
2053                 ap->ability_match_cfg = 0;
2054                 ap->ability_match_count = 0;
2055                 ap->ability_match = 0;
2056                 ap->ack_match = 0;
2057
2058                 rx_cfg_reg = 0;
2059         }
2060
2061         ap->rxconfig = rx_cfg_reg;
2062         ret = ANEG_OK;
2063
2064         switch(ap->state) {
2065         case ANEG_STATE_UNKNOWN:
2066                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2067                         ap->state = ANEG_STATE_AN_ENABLE;
2068
2069                 /* fallthru */
2070         case ANEG_STATE_AN_ENABLE:
2071                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2072                 if (ap->flags & MR_AN_ENABLE) {
2073                         ap->link_time = 0;
2074                         ap->cur_time = 0;
2075                         ap->ability_match_cfg = 0;
2076                         ap->ability_match_count = 0;
2077                         ap->ability_match = 0;
2078                         ap->idle_match = 0;
2079                         ap->ack_match = 0;
2080
2081                         ap->state = ANEG_STATE_RESTART_INIT;
2082                 } else {
2083                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2084                 }
2085                 break;
2086
2087         case ANEG_STATE_RESTART_INIT:
2088                 ap->link_time = ap->cur_time;
2089                 ap->flags &= ~(MR_NP_LOADED);
2090                 ap->txconfig = 0;
2091                 tw32(MAC_TX_AUTO_NEG, 0);
2092                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2093                 tw32_f(MAC_MODE, tp->mac_mode);
2094                 udelay(40);
2095
2096                 ret = ANEG_TIMER_ENAB;
2097                 ap->state = ANEG_STATE_RESTART;
2098
2099                 /* fallthru */
2100         case ANEG_STATE_RESTART:
2101                 delta = ap->cur_time - ap->link_time;
2102                 if (delta > ANEG_STATE_SETTLE_TIME) {
2103                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2104                 } else {
2105                         ret = ANEG_TIMER_ENAB;
2106                 }
2107                 break;
2108
2109         case ANEG_STATE_DISABLE_LINK_OK:
2110                 ret = ANEG_DONE;
2111                 break;
2112
2113         case ANEG_STATE_ABILITY_DETECT_INIT:
2114                 ap->flags &= ~(MR_TOGGLE_TX);
2115                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2116                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2117                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2118                 tw32_f(MAC_MODE, tp->mac_mode);
2119                 udelay(40);
2120
2121                 ap->state = ANEG_STATE_ABILITY_DETECT;
2122                 break;
2123
2124         case ANEG_STATE_ABILITY_DETECT:
2125                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2126                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2127                 }
2128                 break;
2129
2130         case ANEG_STATE_ACK_DETECT_INIT:
2131                 ap->txconfig |= ANEG_CFG_ACK;
2132                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2133                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2134                 tw32_f(MAC_MODE, tp->mac_mode);
2135                 udelay(40);
2136
2137                 ap->state = ANEG_STATE_ACK_DETECT;
2138
2139                 /* fallthru */
2140         case ANEG_STATE_ACK_DETECT:
2141                 if (ap->ack_match != 0) {
2142                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2143                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2144                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2145                         } else {
2146                                 ap->state = ANEG_STATE_AN_ENABLE;
2147                         }
2148                 } else if (ap->ability_match != 0 &&
2149                            ap->rxconfig == 0) {
2150                         ap->state = ANEG_STATE_AN_ENABLE;
2151                 }
2152                 break;
2153
2154         case ANEG_STATE_COMPLETE_ACK_INIT:
2155                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2156                         ret = ANEG_FAILED;
2157                         break;
2158                 }
2159                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2160                                MR_LP_ADV_HALF_DUPLEX |
2161                                MR_LP_ADV_SYM_PAUSE |
2162                                MR_LP_ADV_ASYM_PAUSE |
2163                                MR_LP_ADV_REMOTE_FAULT1 |
2164                                MR_LP_ADV_REMOTE_FAULT2 |
2165                                MR_LP_ADV_NEXT_PAGE |
2166                                MR_TOGGLE_RX |
2167                                MR_NP_RX);
2168                 if (ap->rxconfig & ANEG_CFG_FD)
2169                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2170                 if (ap->rxconfig & ANEG_CFG_HD)
2171                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2172                 if (ap->rxconfig & ANEG_CFG_PS1)
2173                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2174                 if (ap->rxconfig & ANEG_CFG_PS2)
2175                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2176                 if (ap->rxconfig & ANEG_CFG_RF1)
2177                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2178                 if (ap->rxconfig & ANEG_CFG_RF2)
2179                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2180                 if (ap->rxconfig & ANEG_CFG_NP)
2181                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2182
2183                 ap->link_time = ap->cur_time;
2184
2185                 ap->flags ^= (MR_TOGGLE_TX);
2186                 if (ap->rxconfig & 0x0008)
2187                         ap->flags |= MR_TOGGLE_RX;
2188                 if (ap->rxconfig & ANEG_CFG_NP)
2189                         ap->flags |= MR_NP_RX;
2190                 ap->flags |= MR_PAGE_RX;
2191
2192                 ap->state = ANEG_STATE_COMPLETE_ACK;
2193                 ret = ANEG_TIMER_ENAB;
2194                 break;
2195
2196         case ANEG_STATE_COMPLETE_ACK:
2197                 if (ap->ability_match != 0 &&
2198                     ap->rxconfig == 0) {
2199                         ap->state = ANEG_STATE_AN_ENABLE;
2200                         break;
2201                 }
2202                 delta = ap->cur_time - ap->link_time;
2203                 if (delta > ANEG_STATE_SETTLE_TIME) {
2204                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2205                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2206                         } else {
2207                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2208                                     !(ap->flags & MR_NP_RX)) {
2209                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2210                                 } else {
2211                                         ret = ANEG_FAILED;
2212                                 }
2213                         }
2214                 }
2215                 break;
2216
2217         case ANEG_STATE_IDLE_DETECT_INIT:
2218                 ap->link_time = ap->cur_time;
2219                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2220                 tw32_f(MAC_MODE, tp->mac_mode);
2221                 udelay(40);
2222
2223                 ap->state = ANEG_STATE_IDLE_DETECT;
2224                 ret = ANEG_TIMER_ENAB;
2225                 break;
2226
2227         case ANEG_STATE_IDLE_DETECT:
2228                 if (ap->ability_match != 0 &&
2229                     ap->rxconfig == 0) {
2230                         ap->state = ANEG_STATE_AN_ENABLE;
2231                         break;
2232                 }
2233                 delta = ap->cur_time - ap->link_time;
2234                 if (delta > ANEG_STATE_SETTLE_TIME) {
2235                         /* XXX another gem from the Broadcom driver :( */
2236                         ap->state = ANEG_STATE_LINK_OK;
2237                 }
2238                 break;
2239
2240         case ANEG_STATE_LINK_OK:
2241                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2242                 ret = ANEG_DONE;
2243                 break;
2244
2245         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2246                 /* ??? unimplemented */
2247                 break;
2248
2249         case ANEG_STATE_NEXT_PAGE_WAIT:
2250                 /* ??? unimplemented */
2251                 break;
2252
2253         default:
2254                 ret = ANEG_FAILED;
2255                 break;
2256         };
2257
2258         return ret;
2259 }
2260
2261 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2262 {
2263         int res = 0;
2264         struct tg3_fiber_aneginfo aninfo;
2265         int status = ANEG_FAILED;
2266         unsigned int tick;
2267         u32 tmp;
2268
2269         tw32_f(MAC_TX_AUTO_NEG, 0);
2270
2271         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2272         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2273         udelay(40);
2274
2275         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2276         udelay(40);
2277
2278         memset(&aninfo, 0, sizeof(aninfo));
2279         aninfo.flags |= MR_AN_ENABLE;
2280         aninfo.state = ANEG_STATE_UNKNOWN;
2281         aninfo.cur_time = 0;
2282         tick = 0;
2283         while (++tick < 195000) {
2284                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2285                 if (status == ANEG_DONE || status == ANEG_FAILED)
2286                         break;
2287
2288                 udelay(1);
2289         }
2290
2291         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2292         tw32_f(MAC_MODE, tp->mac_mode);
2293         udelay(40);
2294
2295         *flags = aninfo.flags;
2296
2297         if (status == ANEG_DONE &&
2298             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2299                              MR_LP_ADV_FULL_DUPLEX)))
2300                 res = 1;
2301
2302         return res;
2303 }
2304
2305 static void tg3_init_bcm8002(struct tg3 *tp)
2306 {
2307         u32 mac_status = tr32(MAC_STATUS);
2308         int i;
2309
2310         /* Reset when initting first time or we have a link. */
2311         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2312             !(mac_status & MAC_STATUS_PCS_SYNCED))
2313                 return;
2314
2315         /* Set PLL lock range. */
2316         tg3_writephy(tp, 0x16, 0x8007);
2317
2318         /* SW reset */
2319         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2320
2321         /* Wait for reset to complete. */
2322         /* XXX schedule_timeout() ... */
2323         for (i = 0; i < 500; i++)
2324                 udelay(10);
2325
2326         /* Config mode; select PMA/Ch 1 regs. */
2327         tg3_writephy(tp, 0x10, 0x8411);
2328
2329         /* Enable auto-lock and comdet, select txclk for tx. */
2330         tg3_writephy(tp, 0x11, 0x0a10);
2331
2332         tg3_writephy(tp, 0x18, 0x00a0);
2333         tg3_writephy(tp, 0x16, 0x41ff);
2334
2335         /* Assert and deassert POR. */
2336         tg3_writephy(tp, 0x13, 0x0400);
2337         udelay(40);
2338         tg3_writephy(tp, 0x13, 0x0000);
2339
2340         tg3_writephy(tp, 0x11, 0x0a50);
2341         udelay(40);
2342         tg3_writephy(tp, 0x11, 0x0a10);
2343
2344         /* Wait for signal to stabilize */
2345         /* XXX schedule_timeout() ... */
2346         for (i = 0; i < 15000; i++)
2347                 udelay(10);
2348
2349         /* Deselect the channel register so we can read the PHYID
2350          * later.
2351          */
2352         tg3_writephy(tp, 0x10, 0x8011);
2353 }
2354
2355 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2356 {
2357         u32 sg_dig_ctrl, sg_dig_status;
2358         u32 serdes_cfg, expected_sg_dig_ctrl;
2359         int workaround, port_a;
2360         int current_link_up;
2361
2362         serdes_cfg = 0;
2363         expected_sg_dig_ctrl = 0;
2364         workaround = 0;
2365         port_a = 1;
2366         current_link_up = 0;
2367
2368         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2369             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2370                 workaround = 1;
2371                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2372                         port_a = 0;
2373
2374                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2375                 /* preserve bits 20-23 for voltage regulator */
2376                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2377         }
2378
2379         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2380
2381         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2382                 if (sg_dig_ctrl & (1 << 31)) {
2383                         if (workaround) {
2384                                 u32 val = serdes_cfg;
2385
2386                                 if (port_a)
2387                                         val |= 0xc010000;
2388                                 else
2389                                         val |= 0x4010000;
2390                                 tw32_f(MAC_SERDES_CFG, val);
2391                         }
2392                         tw32_f(SG_DIG_CTRL, 0x01388400);
2393                 }
2394                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2395                         tg3_setup_flow_control(tp, 0, 0);
2396                         current_link_up = 1;
2397                 }
2398                 goto out;
2399         }
2400
2401         /* Want auto-negotiation.  */
2402         expected_sg_dig_ctrl = 0x81388400;
2403
2404         /* Pause capability */
2405         expected_sg_dig_ctrl |= (1 << 11);
2406
2407         /* Asymettric pause */
2408         expected_sg_dig_ctrl |= (1 << 12);
2409
2410         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2411                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2412                     tp->serdes_counter &&
2413                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2414                                     MAC_STATUS_RCVD_CFG)) ==
2415                      MAC_STATUS_PCS_SYNCED)) {
2416                         tp->serdes_counter--;
2417                         current_link_up = 1;
2418                         goto out;
2419                 }
2420 restart_autoneg:
2421                 if (workaround)
2422                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2423                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2424                 udelay(5);
2425                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2426
2427                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2428                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2429         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2430                                  MAC_STATUS_SIGNAL_DET)) {
2431                 sg_dig_status = tr32(SG_DIG_STATUS);
2432                 mac_status = tr32(MAC_STATUS);
2433
2434                 if ((sg_dig_status & (1 << 1)) &&
2435                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2436                         u32 local_adv, remote_adv;
2437
2438                         local_adv = ADVERTISE_PAUSE_CAP;
2439                         remote_adv = 0;
2440                         if (sg_dig_status & (1 << 19))
2441                                 remote_adv |= LPA_PAUSE_CAP;
2442                         if (sg_dig_status & (1 << 20))
2443                                 remote_adv |= LPA_PAUSE_ASYM;
2444
2445                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2446                         current_link_up = 1;
2447                         tp->serdes_counter = 0;
2448                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2449                 } else if (!(sg_dig_status & (1 << 1))) {
2450                         if (tp->serdes_counter)
2451                                 tp->serdes_counter--;
2452                         else {
2453                                 if (workaround) {
2454                                         u32 val = serdes_cfg;
2455
2456                                         if (port_a)
2457                                                 val |= 0xc010000;
2458                                         else
2459                                                 val |= 0x4010000;
2460
2461                                         tw32_f(MAC_SERDES_CFG, val);
2462                                 }
2463
2464                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2465                                 udelay(40);
2466
2467                                 /* Link parallel detection - link is up */
2468                                 /* only if we have PCS_SYNC and not */
2469                                 /* receiving config code words */
2470                                 mac_status = tr32(MAC_STATUS);
2471                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2472                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2473                                         tg3_setup_flow_control(tp, 0, 0);
2474                                         current_link_up = 1;
2475                                         tp->tg3_flags2 |=
2476                                                 TG3_FLG2_PARALLEL_DETECT;
2477                                         tp->serdes_counter =
2478                                                 SERDES_PARALLEL_DET_TIMEOUT;
2479                                 } else
2480                                         goto restart_autoneg;
2481                         }
2482                 }
2483         } else {
2484                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2485                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2486         }
2487
2488 out:
2489         return current_link_up;
2490 }
2491
2492 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2493 {
2494         int current_link_up = 0;
2495
2496         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2497                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2498                 goto out;
2499         }
2500
2501         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2502                 u32 flags;
2503                 int i;
2504
2505                 if (fiber_autoneg(tp, &flags)) {
2506                         u32 local_adv, remote_adv;
2507
2508                         local_adv = ADVERTISE_PAUSE_CAP;
2509                         remote_adv = 0;
2510                         if (flags & MR_LP_ADV_SYM_PAUSE)
2511                                 remote_adv |= LPA_PAUSE_CAP;
2512                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2513                                 remote_adv |= LPA_PAUSE_ASYM;
2514
2515                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2516
2517                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2518                         current_link_up = 1;
2519                 }
2520                 for (i = 0; i < 30; i++) {
2521                         udelay(20);
2522                         tw32_f(MAC_STATUS,
2523                                (MAC_STATUS_SYNC_CHANGED |
2524                                 MAC_STATUS_CFG_CHANGED));
2525                         udelay(40);
2526                         if ((tr32(MAC_STATUS) &
2527                              (MAC_STATUS_SYNC_CHANGED |
2528                               MAC_STATUS_CFG_CHANGED)) == 0)
2529                                 break;
2530                 }
2531
2532                 mac_status = tr32(MAC_STATUS);
2533                 if (current_link_up == 0 &&
2534                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2535                     !(mac_status & MAC_STATUS_RCVD_CFG))
2536                         current_link_up = 1;
2537         } else {
2538                 /* Forcing 1000FD link up. */
2539                 current_link_up = 1;
2540                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2541
2542                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2543                 udelay(40);
2544         }
2545
2546 out:
2547         return current_link_up;
2548 }
2549
2550 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2551 {
2552         u32 orig_pause_cfg;
2553         u16 orig_active_speed;
2554         u8 orig_active_duplex;
2555         u32 mac_status;
2556         int current_link_up;
2557         int i;
2558
2559         orig_pause_cfg =
2560                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2561                                   TG3_FLAG_TX_PAUSE));
2562         orig_active_speed = tp->link_config.active_speed;
2563         orig_active_duplex = tp->link_config.active_duplex;
2564
2565         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2566             netif_carrier_ok(tp->dev) &&
2567             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2568                 mac_status = tr32(MAC_STATUS);
2569                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2570                                MAC_STATUS_SIGNAL_DET |
2571                                MAC_STATUS_CFG_CHANGED |
2572                                MAC_STATUS_RCVD_CFG);
2573                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2574                                    MAC_STATUS_SIGNAL_DET)) {
2575                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2576                                             MAC_STATUS_CFG_CHANGED));
2577                         return 0;
2578                 }
2579         }
2580
2581         tw32_f(MAC_TX_AUTO_NEG, 0);
2582
2583         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2584         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2585         tw32_f(MAC_MODE, tp->mac_mode);
2586         udelay(40);
2587
2588         if (tp->phy_id == PHY_ID_BCM8002)
2589                 tg3_init_bcm8002(tp);
2590
2591         /* Enable link change event even when serdes polling.  */
2592         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2593         udelay(40);
2594
2595         current_link_up = 0;
2596         mac_status = tr32(MAC_STATUS);
2597
2598         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2599                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2600         else
2601                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2602
2603         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2604         tw32_f(MAC_MODE, tp->mac_mode);
2605         udelay(40);
2606
2607         tp->hw_status->status =
2608                 (SD_STATUS_UPDATED |
2609                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2610
2611         for (i = 0; i < 100; i++) {
2612                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2613                                     MAC_STATUS_CFG_CHANGED));
2614                 udelay(5);
2615                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2616                                          MAC_STATUS_CFG_CHANGED |
2617                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2618                         break;
2619         }
2620
2621         mac_status = tr32(MAC_STATUS);
2622         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2623                 current_link_up = 0;
2624                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2625                     tp->serdes_counter == 0) {
2626                         tw32_f(MAC_MODE, (tp->mac_mode |
2627                                           MAC_MODE_SEND_CONFIGS));
2628                         udelay(1);
2629                         tw32_f(MAC_MODE, tp->mac_mode);
2630                 }
2631         }
2632
2633         if (current_link_up == 1) {
2634                 tp->link_config.active_speed = SPEED_1000;
2635                 tp->link_config.active_duplex = DUPLEX_FULL;
2636                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2637                                     LED_CTRL_LNKLED_OVERRIDE |
2638                                     LED_CTRL_1000MBPS_ON));
2639         } else {
2640                 tp->link_config.active_speed = SPEED_INVALID;
2641                 tp->link_config.active_duplex = DUPLEX_INVALID;
2642                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2643                                     LED_CTRL_LNKLED_OVERRIDE |
2644                                     LED_CTRL_TRAFFIC_OVERRIDE));
2645         }
2646
2647         if (current_link_up != netif_carrier_ok(tp->dev)) {
2648                 if (current_link_up)
2649                         netif_carrier_on(tp->dev);
2650                 else
2651                         netif_carrier_off(tp->dev);
2652                 tg3_link_report(tp);
2653         } else {
2654                 u32 now_pause_cfg =
2655                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2656                                          TG3_FLAG_TX_PAUSE);
2657                 if (orig_pause_cfg != now_pause_cfg ||
2658                     orig_active_speed != tp->link_config.active_speed ||
2659                     orig_active_duplex != tp->link_config.active_duplex)
2660                         tg3_link_report(tp);
2661         }
2662
2663         return 0;
2664 }
2665
2666 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2667 {
2668         int current_link_up, err = 0;
2669         u32 bmsr, bmcr;
2670         u16 current_speed;
2671         u8 current_duplex;
2672
2673         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2674         tw32_f(MAC_MODE, tp->mac_mode);
2675         udelay(40);
2676
2677         tw32(MAC_EVENT, 0);
2678
2679         tw32_f(MAC_STATUS,
2680              (MAC_STATUS_SYNC_CHANGED |
2681               MAC_STATUS_CFG_CHANGED |
2682               MAC_STATUS_MI_COMPLETION |
2683               MAC_STATUS_LNKSTATE_CHANGED));
2684         udelay(40);
2685
2686         if (force_reset)
2687                 tg3_phy_reset(tp);
2688
2689         current_link_up = 0;
2690         current_speed = SPEED_INVALID;
2691         current_duplex = DUPLEX_INVALID;
2692
2693         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2694         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2695         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2696                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2697                         bmsr |= BMSR_LSTATUS;
2698                 else
2699                         bmsr &= ~BMSR_LSTATUS;
2700         }
2701
2702         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2703
2704         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2705             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2706                 /* do nothing, just check for link up at the end */
2707         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2708                 u32 adv, new_adv;
2709
2710                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2711                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2712                                   ADVERTISE_1000XPAUSE |
2713                                   ADVERTISE_1000XPSE_ASYM |
2714                                   ADVERTISE_SLCT);
2715
2716                 /* Always advertise symmetric PAUSE just like copper */
2717                 new_adv |= ADVERTISE_1000XPAUSE;
2718
2719                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2720                         new_adv |= ADVERTISE_1000XHALF;
2721                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2722                         new_adv |= ADVERTISE_1000XFULL;
2723
2724                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2725                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2726                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2727                         tg3_writephy(tp, MII_BMCR, bmcr);
2728
2729                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2730                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2731                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2732
2733                         return err;
2734                 }
2735         } else {
2736                 u32 new_bmcr;
2737
2738                 bmcr &= ~BMCR_SPEED1000;
2739                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2740
2741                 if (tp->link_config.duplex == DUPLEX_FULL)
2742                         new_bmcr |= BMCR_FULLDPLX;
2743
2744                 if (new_bmcr != bmcr) {
2745                         /* BMCR_SPEED1000 is a reserved bit that needs
2746                          * to be set on write.
2747                          */
2748                         new_bmcr |= BMCR_SPEED1000;
2749
2750                         /* Force a linkdown */
2751                         if (netif_carrier_ok(tp->dev)) {
2752                                 u32 adv;
2753
2754                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2755                                 adv &= ~(ADVERTISE_1000XFULL |
2756                                          ADVERTISE_1000XHALF |
2757                                          ADVERTISE_SLCT);
2758                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2759                                 tg3_writephy(tp, MII_BMCR, bmcr |
2760                                                            BMCR_ANRESTART |
2761                                                            BMCR_ANENABLE);
2762                                 udelay(10);
2763                                 netif_carrier_off(tp->dev);
2764                         }
2765                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2766                         bmcr = new_bmcr;
2767                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2768                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2769                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2770                             ASIC_REV_5714) {
2771                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2772                                         bmsr |= BMSR_LSTATUS;
2773                                 else
2774                                         bmsr &= ~BMSR_LSTATUS;
2775                         }
2776                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2777                 }
2778         }
2779
2780         if (bmsr & BMSR_LSTATUS) {
2781                 current_speed = SPEED_1000;
2782                 current_link_up = 1;
2783                 if (bmcr & BMCR_FULLDPLX)
2784                         current_duplex = DUPLEX_FULL;
2785                 else
2786                         current_duplex = DUPLEX_HALF;
2787
2788                 if (bmcr & BMCR_ANENABLE) {
2789                         u32 local_adv, remote_adv, common;
2790
2791                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2792                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2793                         common = local_adv & remote_adv;
2794                         if (common & (ADVERTISE_1000XHALF |
2795                                       ADVERTISE_1000XFULL)) {
2796                                 if (common & ADVERTISE_1000XFULL)
2797                                         current_duplex = DUPLEX_FULL;
2798                                 else
2799                                         current_duplex = DUPLEX_HALF;
2800
2801                                 tg3_setup_flow_control(tp, local_adv,
2802                                                        remote_adv);
2803                         }
2804                         else
2805                                 current_link_up = 0;
2806                 }
2807         }
2808
2809         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2810         if (tp->link_config.active_duplex == DUPLEX_HALF)
2811                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2812
2813         tw32_f(MAC_MODE, tp->mac_mode);
2814         udelay(40);
2815
2816         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2817
2818         tp->link_config.active_speed = current_speed;
2819         tp->link_config.active_duplex = current_duplex;
2820
2821         if (current_link_up != netif_carrier_ok(tp->dev)) {
2822                 if (current_link_up)
2823                         netif_carrier_on(tp->dev);
2824                 else {
2825                         netif_carrier_off(tp->dev);
2826                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2827                 }
2828                 tg3_link_report(tp);
2829         }
2830         return err;
2831 }
2832
2833 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2834 {
2835         if (tp->serdes_counter) {
2836                 /* Give autoneg time to complete. */
2837                 tp->serdes_counter--;
2838                 return;
2839         }
2840         if (!netif_carrier_ok(tp->dev) &&
2841             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2842                 u32 bmcr;
2843
2844                 tg3_readphy(tp, MII_BMCR, &bmcr);
2845                 if (bmcr & BMCR_ANENABLE) {
2846                         u32 phy1, phy2;
2847
2848                         /* Select shadow register 0x1f */
2849                         tg3_writephy(tp, 0x1c, 0x7c00);
2850                         tg3_readphy(tp, 0x1c, &phy1);
2851
2852                         /* Select expansion interrupt status register */
2853                         tg3_writephy(tp, 0x17, 0x0f01);
2854                         tg3_readphy(tp, 0x15, &phy2);
2855                         tg3_readphy(tp, 0x15, &phy2);
2856
2857                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2858                                 /* We have signal detect and not receiving
2859                                  * config code words, link is up by parallel
2860                                  * detection.
2861                                  */
2862
2863                                 bmcr &= ~BMCR_ANENABLE;
2864                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2865                                 tg3_writephy(tp, MII_BMCR, bmcr);
2866                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2867                         }
2868                 }
2869         }
2870         else if (netif_carrier_ok(tp->dev) &&
2871                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2872                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2873                 u32 phy2;
2874
2875                 /* Select expansion interrupt status register */
2876                 tg3_writephy(tp, 0x17, 0x0f01);
2877                 tg3_readphy(tp, 0x15, &phy2);
2878                 if (phy2 & 0x20) {
2879                         u32 bmcr;
2880
2881                         /* Config code words received, turn on autoneg. */
2882                         tg3_readphy(tp, MII_BMCR, &bmcr);
2883                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2884
2885                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2886
2887                 }
2888         }
2889 }
2890
2891 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2892 {
2893         int err;
2894
2895         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2896                 err = tg3_setup_fiber_phy(tp, force_reset);
2897         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2898                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2899         } else {
2900                 err = tg3_setup_copper_phy(tp, force_reset);
2901         }
2902
2903         if (tp->link_config.active_speed == SPEED_1000 &&
2904             tp->link_config.active_duplex == DUPLEX_HALF)
2905                 tw32(MAC_TX_LENGTHS,
2906                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2907                       (6 << TX_LENGTHS_IPG_SHIFT) |
2908                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2909         else
2910                 tw32(MAC_TX_LENGTHS,
2911                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2912                       (6 << TX_LENGTHS_IPG_SHIFT) |
2913                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2914
2915         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2916                 if (netif_carrier_ok(tp->dev)) {
2917                         tw32(HOSTCC_STAT_COAL_TICKS,
2918                              tp->coal.stats_block_coalesce_usecs);
2919                 } else {
2920                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2921                 }
2922         }
2923
2924         return err;
2925 }
2926
2927 /* This is called whenever we suspect that the system chipset is re-
2928  * ordering the sequence of MMIO to the tx send mailbox. The symptom
2929  * is bogus tx completions. We try to recover by setting the
2930  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2931  * in the workqueue.
2932  */
2933 static void tg3_tx_recover(struct tg3 *tp)
2934 {
2935         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
2936                tp->write32_tx_mbox == tg3_write_indirect_mbox);
2937
2938         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
2939                "mapped I/O cycles to the network device, attempting to "
2940                "recover. Please report the problem to the driver maintainer "
2941                "and include system chipset information.\n", tp->dev->name);
2942
2943         spin_lock(&tp->lock);
2944         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
2945         spin_unlock(&tp->lock);
2946 }
2947
2948 static inline u32 tg3_tx_avail(struct tg3 *tp)
2949 {
2950         smp_mb();
2951         return (tp->tx_pending -
2952                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
2953 }
2954
2955 /* Tigon3 never reports partial packet sends.  So we do not
2956  * need special logic to handle SKBs that have not had all
2957  * of their frags sent yet, like SunGEM does.
2958  */
2959 static void tg3_tx(struct tg3 *tp)
2960 {
2961         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2962         u32 sw_idx = tp->tx_cons;
2963
2964         while (sw_idx != hw_idx) {
2965                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2966                 struct sk_buff *skb = ri->skb;
2967                 int i, tx_bug = 0;
2968
2969                 if (unlikely(skb == NULL)) {
2970                         tg3_tx_recover(tp);
2971                         return;
2972                 }
2973
2974                 pci_unmap_single(tp->pdev,
2975                                  pci_unmap_addr(ri, mapping),
2976                                  skb_headlen(skb),
2977                                  PCI_DMA_TODEVICE);
2978
2979                 ri->skb = NULL;
2980
2981                 sw_idx = NEXT_TX(sw_idx);
2982
2983                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2984                         ri = &tp->tx_buffers[sw_idx];
2985                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
2986                                 tx_bug = 1;
2987
2988                         pci_unmap_page(tp->pdev,
2989                                        pci_unmap_addr(ri, mapping),
2990                                        skb_shinfo(skb)->frags[i].size,
2991                                        PCI_DMA_TODEVICE);
2992
2993                         sw_idx = NEXT_TX(sw_idx);
2994                 }
2995
2996                 dev_kfree_skb(skb);
2997
2998                 if (unlikely(tx_bug)) {
2999                         tg3_tx_recover(tp);
3000                         return;
3001                 }
3002         }
3003
3004         tp->tx_cons = sw_idx;
3005
3006         /* Need to make the tx_cons update visible to tg3_start_xmit()
3007          * before checking for netif_queue_stopped().  Without the
3008          * memory barrier, there is a small possibility that tg3_start_xmit()
3009          * will miss it and cause the queue to be stopped forever.
3010          */
3011         smp_mb();
3012
3013         if (unlikely(netif_queue_stopped(tp->dev) &&
3014                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) {
3015                 netif_tx_lock(tp->dev);
3016                 if (netif_queue_stopped(tp->dev) &&
3017                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))
3018                         netif_wake_queue(tp->dev);
3019                 netif_tx_unlock(tp->dev);
3020         }
3021 }
3022
3023 /* Returns size of skb allocated or < 0 on error.
3024  *
3025  * We only need to fill in the address because the other members
3026  * of the RX descriptor are invariant, see tg3_init_rings.
3027  *
3028  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3029  * posting buffers we only dirty the first cache line of the RX
3030  * descriptor (containing the address).  Whereas for the RX status
3031  * buffers the cpu only reads the last cacheline of the RX descriptor
3032  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3033  */
3034 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3035                             int src_idx, u32 dest_idx_unmasked)
3036 {
3037         struct tg3_rx_buffer_desc *desc;
3038         struct ring_info *map, *src_map;
3039         struct sk_buff *skb;
3040         dma_addr_t mapping;
3041         int skb_size, dest_idx;
3042
3043         src_map = NULL;
3044         switch (opaque_key) {
3045         case RXD_OPAQUE_RING_STD:
3046                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3047                 desc = &tp->rx_std[dest_idx];
3048                 map = &tp->rx_std_buffers[dest_idx];
3049                 if (src_idx >= 0)
3050                         src_map = &tp->rx_std_buffers[src_idx];
3051                 skb_size = tp->rx_pkt_buf_sz;
3052                 break;
3053
3054         case RXD_OPAQUE_RING_JUMBO:
3055                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3056                 desc = &tp->rx_jumbo[dest_idx];
3057                 map = &tp->rx_jumbo_buffers[dest_idx];
3058                 if (src_idx >= 0)
3059                         src_map = &tp->rx_jumbo_buffers[src_idx];
3060                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3061                 break;
3062
3063         default:
3064                 return -EINVAL;
3065         };
3066
3067         /* Do not overwrite any of the map or rp information
3068          * until we are sure we can commit to a new buffer.
3069          *
3070          * Callers depend upon this behavior and assume that
3071          * we leave everything unchanged if we fail.
3072          */
3073         skb = netdev_alloc_skb(tp->dev, skb_size);
3074         if (skb == NULL)
3075                 return -ENOMEM;
3076
3077         skb_reserve(skb, tp->rx_offset);
3078
3079         mapping = pci_map_single(tp->pdev, skb->data,
3080                                  skb_size - tp->rx_offset,
3081                                  PCI_DMA_FROMDEVICE);
3082
3083         map->skb = skb;
3084         pci_unmap_addr_set(map, mapping, mapping);
3085
3086         if (src_map != NULL)
3087                 src_map->skb = NULL;
3088
3089         desc->addr_hi = ((u64)mapping >> 32);
3090         desc->addr_lo = ((u64)mapping & 0xffffffff);
3091
3092         return skb_size;
3093 }
3094
3095 /* We only need to move over in the address because the other
3096  * members of the RX descriptor are invariant.  See notes above
3097  * tg3_alloc_rx_skb for full details.
3098  */
3099 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3100                            int src_idx, u32 dest_idx_unmasked)
3101 {
3102         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3103         struct ring_info *src_map, *dest_map;
3104         int dest_idx;
3105
3106         switch (opaque_key) {
3107         case RXD_OPAQUE_RING_STD:
3108                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3109                 dest_desc = &tp->rx_std[dest_idx];
3110                 dest_map = &tp->rx_std_buffers[dest_idx];
3111                 src_desc = &tp->rx_std[src_idx];
3112                 src_map = &tp->rx_std_buffers[src_idx];
3113                 break;
3114
3115         case RXD_OPAQUE_RING_JUMBO:
3116                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3117                 dest_desc = &tp->rx_jumbo[dest_idx];
3118                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3119                 src_desc = &tp->rx_jumbo[src_idx];
3120                 src_map = &tp->rx_jumbo_buffers[src_idx];
3121                 break;
3122
3123         default:
3124                 return;
3125         };
3126
3127         dest_map->skb = src_map->skb;
3128         pci_unmap_addr_set(dest_map, mapping,
3129                            pci_unmap_addr(src_map, mapping));
3130         dest_desc->addr_hi = src_desc->addr_hi;
3131         dest_desc->addr_lo = src_desc->addr_lo;
3132
3133         src_map->skb = NULL;
3134 }
3135
3136 #if TG3_VLAN_TAG_USED
3137 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3138 {
3139         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3140 }
3141 #endif
3142
3143 /* The RX ring scheme is composed of multiple rings which post fresh
3144  * buffers to the chip, and one special ring the chip uses to report
3145  * status back to the host.
3146  *
3147  * The special ring reports the status of received packets to the
3148  * host.  The chip does not write into the original descriptor the
3149  * RX buffer was obtained from.  The chip simply takes the original
3150  * descriptor as provided by the host, updates the status and length
3151  * field, then writes this into the next status ring entry.
3152  *
3153  * Each ring the host uses to post buffers to the chip is described
3154  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3155  * it is first placed into the on-chip ram.  When the packet's length
3156  * is known, it walks down the TG3_BDINFO entries to select the ring.
3157  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3158  * which is within the range of the new packet's length is chosen.
3159  *
3160  * The "separate ring for rx status" scheme may sound queer, but it makes
3161  * sense from a cache coherency perspective.  If only the host writes
3162  * to the buffer post rings, and only the chip writes to the rx status
3163  * rings, then cache lines never move beyond shared-modified state.
3164  * If both the host and chip were to write into the same ring, cache line
3165  * eviction could occur since both entities want it in an exclusive state.
3166  */
3167 static int tg3_rx(struct tg3 *tp, int budget)
3168 {
3169         u32 work_mask, rx_std_posted = 0;
3170         u32 sw_idx = tp->rx_rcb_ptr;
3171         u16 hw_idx;
3172         int received;
3173
3174         hw_idx = tp->hw_status->idx[0].rx_producer;
3175         /*
3176          * We need to order the read of hw_idx and the read of
3177          * the opaque cookie.
3178          */
3179         rmb();
3180         work_mask = 0;
3181         received = 0;
3182         while (sw_idx != hw_idx && budget > 0) {
3183                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3184                 unsigned int len;
3185                 struct sk_buff *skb;
3186                 dma_addr_t dma_addr;
3187                 u32 opaque_key, desc_idx, *post_ptr;
3188
3189                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3190                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3191                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3192                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3193                                                   mapping);
3194                         skb = tp->rx_std_buffers[desc_idx].skb;
3195                         post_ptr = &tp->rx_std_ptr;
3196                         rx_std_posted++;
3197                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3198                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3199                                                   mapping);
3200                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3201                         post_ptr = &tp->rx_jumbo_ptr;
3202                 }
3203                 else {
3204                         goto next_pkt_nopost;
3205                 }
3206
3207                 work_mask |= opaque_key;
3208
3209                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3210                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3211                 drop_it:
3212                         tg3_recycle_rx(tp, opaque_key,
3213                                        desc_idx, *post_ptr);
3214                 drop_it_no_recycle:
3215                         /* Other statistics kept track of by card. */
3216                         tp->net_stats.rx_dropped++;
3217                         goto next_pkt;
3218                 }
3219
3220                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3221
3222                 if (len > RX_COPY_THRESHOLD
3223                         && tp->rx_offset == 2
3224                         /* rx_offset != 2 iff this is a 5701 card running
3225                          * in PCI-X mode [see tg3_get_invariants()] */
3226                 ) {
3227                         int skb_size;
3228
3229                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3230                                                     desc_idx, *post_ptr);
3231                         if (skb_size < 0)
3232                                 goto drop_it;
3233
3234                         pci_unmap_single(tp->pdev, dma_addr,
3235                                          skb_size - tp->rx_offset,
3236                                          PCI_DMA_FROMDEVICE);
3237
3238                         skb_put(skb, len);
3239                 } else {
3240                         struct sk_buff *copy_skb;
3241
3242                         tg3_recycle_rx(tp, opaque_key,
3243                                        desc_idx, *post_ptr);
3244
3245                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3246                         if (copy_skb == NULL)
3247                                 goto drop_it_no_recycle;
3248
3249                         skb_reserve(copy_skb, 2);
3250                         skb_put(copy_skb, len);
3251                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3252                         memcpy(copy_skb->data, skb->data, len);
3253                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3254
3255                         /* We'll reuse the original ring buffer. */
3256                         skb = copy_skb;
3257                 }
3258
3259                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3260                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3261                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3262                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3263                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3264                 else
3265                         skb->ip_summed = CHECKSUM_NONE;
3266
3267                 skb->protocol = eth_type_trans(skb, tp->dev);
3268 #if TG3_VLAN_TAG_USED
3269                 if (tp->vlgrp != NULL &&
3270                     desc->type_flags & RXD_FLAG_VLAN) {
3271                         tg3_vlan_rx(tp, skb,
3272                                     desc->err_vlan & RXD_VLAN_MASK);
3273                 } else
3274 #endif
3275                         netif_receive_skb(skb);
3276
3277                 tp->dev->last_rx = jiffies;
3278                 received++;
3279                 budget--;
3280
3281 next_pkt:
3282                 (*post_ptr)++;
3283
3284                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3285                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3286
3287                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3288                                      TG3_64BIT_REG_LOW, idx);
3289                         work_mask &= ~RXD_OPAQUE_RING_STD;
3290                         rx_std_posted = 0;
3291                 }
3292 next_pkt_nopost:
3293                 sw_idx++;
3294                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3295
3296                 /* Refresh hw_idx to see if there is new work */
3297                 if (sw_idx == hw_idx) {
3298                         hw_idx = tp->hw_status->idx[0].rx_producer;
3299                         rmb();
3300                 }
3301         }
3302
3303         /* ACK the status ring. */
3304         tp->rx_rcb_ptr = sw_idx;
3305         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3306
3307         /* Refill RX ring(s). */
3308         if (work_mask & RXD_OPAQUE_RING_STD) {
3309                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3310                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3311                              sw_idx);
3312         }
3313         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3314                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3315                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3316                              sw_idx);
3317         }
3318         mmiowb();
3319
3320         return received;
3321 }
3322
3323 static int tg3_poll(struct net_device *netdev, int *budget)
3324 {
3325         struct tg3 *tp = netdev_priv(netdev);
3326         struct tg3_hw_status *sblk = tp->hw_status;
3327         int done;
3328
3329         /* handle link change and other phy events */
3330         if (!(tp->tg3_flags &
3331               (TG3_FLAG_USE_LINKCHG_REG |
3332                TG3_FLAG_POLL_SERDES))) {
3333                 if (sblk->status & SD_STATUS_LINK_CHG) {
3334                         sblk->status = SD_STATUS_UPDATED |
3335                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3336                         spin_lock(&tp->lock);
3337                         tg3_setup_phy(tp, 0);
3338                         spin_unlock(&tp->lock);
3339                 }
3340         }
3341
3342         /* run TX completion thread */
3343         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3344                 tg3_tx(tp);
3345                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3346                         netif_rx_complete(netdev);
3347                         schedule_work(&tp->reset_task);
3348                         return 0;
3349                 }
3350         }
3351
3352         /* run RX thread, within the bounds set by NAPI.
3353          * All RX "locking" is done by ensuring outside
3354          * code synchronizes with dev->poll()
3355          */
3356         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3357                 int orig_budget = *budget;
3358                 int work_done;
3359
3360                 if (orig_budget > netdev->quota)
3361                         orig_budget = netdev->quota;
3362
3363                 work_done = tg3_rx(tp, orig_budget);
3364
3365                 *budget -= work_done;
3366                 netdev->quota -= work_done;
3367         }
3368
3369         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3370                 tp->last_tag = sblk->status_tag;
3371                 rmb();
3372         } else
3373                 sblk->status &= ~SD_STATUS_UPDATED;
3374
3375         /* if no more work, tell net stack and NIC we're done */
3376         done = !tg3_has_work(tp);
3377         if (done) {
3378                 netif_rx_complete(netdev);
3379                 tg3_restart_ints(tp);
3380         }
3381
3382         return (done ? 0 : 1);
3383 }
3384
3385 static void tg3_irq_quiesce(struct tg3 *tp)
3386 {
3387         BUG_ON(tp->irq_sync);
3388
3389         tp->irq_sync = 1;
3390         smp_mb();
3391
3392         synchronize_irq(tp->pdev->irq);
3393 }
3394
3395 static inline int tg3_irq_sync(struct tg3 *tp)
3396 {
3397         return tp->irq_sync;
3398 }
3399
3400 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3401  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3402  * with as well.  Most of the time, this is not necessary except when
3403  * shutting down the device.
3404  */
3405 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3406 {
3407         if (irq_sync)
3408                 tg3_irq_quiesce(tp);
3409         spin_lock_bh(&tp->lock);
3410 }
3411
3412 static inline void tg3_full_unlock(struct tg3 *tp)
3413 {
3414         spin_unlock_bh(&tp->lock);
3415 }
3416
3417 /* One-shot MSI handler - Chip automatically disables interrupt
3418  * after sending MSI so driver doesn't have to do it.
3419  */
3420 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3421 {
3422         struct net_device *dev = dev_id;
3423         struct tg3 *tp = netdev_priv(dev);
3424
3425         prefetch(tp->hw_status);
3426         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3427
3428         if (likely(!tg3_irq_sync(tp)))
3429                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3430
3431         return IRQ_HANDLED;
3432 }
3433
3434 /* MSI ISR - No need to check for interrupt sharing and no need to
3435  * flush status block and interrupt mailbox. PCI ordering rules
3436  * guarantee that MSI will arrive after the status block.
3437  */
3438 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3439 {
3440         struct net_device *dev = dev_id;
3441         struct tg3 *tp = netdev_priv(dev);
3442
3443         prefetch(tp->hw_status);
3444         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3445         /*
3446          * Writing any value to intr-mbox-0 clears PCI INTA# and
3447          * chip-internal interrupt pending events.
3448          * Writing non-zero to intr-mbox-0 additional tells the
3449          * NIC to stop sending us irqs, engaging "in-intr-handler"
3450          * event coalescing.
3451          */
3452         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3453         if (likely(!tg3_irq_sync(tp)))
3454                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3455
3456         return IRQ_RETVAL(1);
3457 }
3458
3459 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3460 {
3461         struct net_device *dev = dev_id;
3462         struct tg3 *tp = netdev_priv(dev);
3463         struct tg3_hw_status *sblk = tp->hw_status;
3464         unsigned int handled = 1;
3465
3466         /* In INTx mode, it is possible for the interrupt to arrive at
3467          * the CPU before the status block posted prior to the interrupt.
3468          * Reading the PCI State register will confirm whether the
3469          * interrupt is ours and will flush the status block.
3470          */
3471         if ((sblk->status & SD_STATUS_UPDATED) ||
3472             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3473                 /*
3474                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3475                  * chip-internal interrupt pending events.
3476                  * Writing non-zero to intr-mbox-0 additional tells the
3477                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3478                  * event coalescing.
3479                  */
3480                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3481                              0x00000001);
3482                 if (tg3_irq_sync(tp))
3483                         goto out;
3484                 sblk->status &= ~SD_STATUS_UPDATED;
3485                 if (likely(tg3_has_work(tp))) {
3486                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3487                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3488                 } else {
3489                         /* No work, shared interrupt perhaps?  re-enable
3490                          * interrupts, and flush that PCI write
3491                          */
3492                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3493                                 0x00000000);
3494                 }
3495         } else {        /* shared interrupt */
3496                 handled = 0;
3497         }
3498 out:
3499         return IRQ_RETVAL(handled);
3500 }
3501
3502 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3503 {
3504         struct net_device *dev = dev_id;
3505         struct tg3 *tp = netdev_priv(dev);
3506         struct tg3_hw_status *sblk = tp->hw_status;
3507         unsigned int handled = 1;
3508
3509         /* In INTx mode, it is possible for the interrupt to arrive at
3510          * the CPU before the status block posted prior to the interrupt.
3511          * Reading the PCI State register will confirm whether the
3512          * interrupt is ours and will flush the status block.
3513          */
3514         if ((sblk->status_tag != tp->last_tag) ||
3515             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3516                 /*
3517                  * writing any value to intr-mbox-0 clears PCI INTA# and
3518                  * chip-internal interrupt pending events.
3519                  * writing non-zero to intr-mbox-0 additional tells the
3520                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3521                  * event coalescing.
3522                  */
3523                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3524                              0x00000001);
3525                 if (tg3_irq_sync(tp))
3526                         goto out;
3527                 if (netif_rx_schedule_prep(dev)) {
3528                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3529                         /* Update last_tag to mark that this status has been
3530                          * seen. Because interrupt may be shared, we may be
3531                          * racing with tg3_poll(), so only update last_tag
3532                          * if tg3_poll() is not scheduled.
3533                          */
3534                         tp->last_tag = sblk->status_tag;
3535                         __netif_rx_schedule(dev);
3536                 }
3537         } else {        /* shared interrupt */
3538                 handled = 0;
3539         }
3540 out:
3541         return IRQ_RETVAL(handled);
3542 }
3543
3544 /* ISR for interrupt test */
3545 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3546                 struct pt_regs *regs)
3547 {
3548         struct net_device *dev = dev_id;
3549         struct tg3 *tp = netdev_priv(dev);
3550         struct tg3_hw_status *sblk = tp->hw_status;
3551
3552         if ((sblk->status & SD_STATUS_UPDATED) ||
3553             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3554                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3555                              0x00000001);
3556                 return IRQ_RETVAL(1);
3557         }
3558         return IRQ_RETVAL(0);
3559 }
3560
3561 static int tg3_init_hw(struct tg3 *, int);
3562 static int tg3_halt(struct tg3 *, int, int);
3563
3564 /* Restart hardware after configuration changes, self-test, etc.
3565  * Invoked with tp->lock held.
3566  */
3567 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3568 {
3569         int err;
3570
3571         err = tg3_init_hw(tp, reset_phy);
3572         if (err) {
3573                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3574                        "aborting.\n", tp->dev->name);
3575                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3576                 tg3_full_unlock(tp);
3577                 del_timer_sync(&tp->timer);
3578                 tp->irq_sync = 0;
3579                 netif_poll_enable(tp->dev);
3580                 dev_close(tp->dev);
3581                 tg3_full_lock(tp, 0);
3582         }
3583         return err;
3584 }
3585
3586 #ifdef CONFIG_NET_POLL_CONTROLLER
3587 static void tg3_poll_controller(struct net_device *dev)
3588 {
3589         struct tg3 *tp = netdev_priv(dev);
3590
3591         tg3_interrupt(tp->pdev->irq, dev, NULL);
3592 }
3593 #endif
3594
3595 static void tg3_reset_task(void *_data)
3596 {
3597         struct tg3 *tp = _data;
3598         unsigned int restart_timer;
3599
3600         tg3_full_lock(tp, 0);
3601         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3602
3603         if (!netif_running(tp->dev)) {
3604                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3605                 tg3_full_unlock(tp);
3606                 return;
3607         }
3608
3609         tg3_full_unlock(tp);
3610
3611         tg3_netif_stop(tp);
3612
3613         tg3_full_lock(tp, 1);
3614
3615         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3616         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3617
3618         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3619                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3620                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3621                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3622                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3623         }
3624
3625         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3626         if (tg3_init_hw(tp, 1))
3627                 goto out;
3628
3629         tg3_netif_start(tp);
3630
3631         if (restart_timer)
3632                 mod_timer(&tp->timer, jiffies + 1);
3633
3634 out:
3635         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3636
3637         tg3_full_unlock(tp);
3638 }
3639
3640 static void tg3_tx_timeout(struct net_device *dev)
3641 {
3642         struct tg3 *tp = netdev_priv(dev);
3643
3644         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3645                dev->name);
3646
3647         schedule_work(&tp->reset_task);
3648 }
3649
3650 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3651 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3652 {
3653         u32 base = (u32) mapping & 0xffffffff;
3654
3655         return ((base > 0xffffdcc0) &&
3656                 (base + len + 8 < base));
3657 }
3658
3659 /* Test for DMA addresses > 40-bit */
3660 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3661                                           int len)
3662 {
3663 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3664         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3665                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3666         return 0;
3667 #else
3668         return 0;
3669 #endif
3670 }
3671
3672 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3673
3674 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3675 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3676                                        u32 last_plus_one, u32 *start,
3677                                        u32 base_flags, u32 mss)
3678 {
3679         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3680         dma_addr_t new_addr = 0;
3681         u32 entry = *start;
3682         int i, ret = 0;
3683
3684         if (!new_skb) {
3685                 ret = -1;
3686         } else {
3687                 /* New SKB is guaranteed to be linear. */
3688                 entry = *start;
3689                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3690                                           PCI_DMA_TODEVICE);
3691                 /* Make sure new skb does not cross any 4G boundaries.
3692                  * Drop the packet if it does.
3693                  */
3694                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3695                         ret = -1;
3696                         dev_kfree_skb(new_skb);
3697                         new_skb = NULL;
3698                 } else {
3699                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3700                                     base_flags, 1 | (mss << 1));
3701                         *start = NEXT_TX(entry);
3702                 }
3703         }
3704
3705         /* Now clean up the sw ring entries. */
3706         i = 0;
3707         while (entry != last_plus_one) {
3708                 int len;
3709
3710                 if (i == 0)
3711                         len = skb_headlen(skb);
3712                 else
3713                         len = skb_shinfo(skb)->frags[i-1].size;
3714                 pci_unmap_single(tp->pdev,
3715                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3716                                  len, PCI_DMA_TODEVICE);
3717                 if (i == 0) {
3718                         tp->tx_buffers[entry].skb = new_skb;
3719                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3720                 } else {
3721                         tp->tx_buffers[entry].skb = NULL;
3722                 }
3723                 entry = NEXT_TX(entry);
3724                 i++;
3725         }
3726
3727         dev_kfree_skb(skb);
3728
3729         return ret;
3730 }
3731
3732 static void tg3_set_txd(struct tg3 *tp, int entry,
3733                         dma_addr_t mapping, int len, u32 flags,
3734                         u32 mss_and_is_end)
3735 {
3736         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3737         int is_end = (mss_and_is_end & 0x1);
3738         u32 mss = (mss_and_is_end >> 1);
3739         u32 vlan_tag = 0;
3740
3741         if (is_end)
3742                 flags |= TXD_FLAG_END;
3743         if (flags & TXD_FLAG_VLAN) {
3744                 vlan_tag = flags >> 16;
3745                 flags &= 0xffff;
3746         }
3747         vlan_tag |= (mss << TXD_MSS_SHIFT);
3748
3749         txd->addr_hi = ((u64) mapping >> 32);
3750         txd->addr_lo = ((u64) mapping & 0xffffffff);
3751         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3752         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3753 }
3754
3755 /* hard_start_xmit for devices that don't have any bugs and
3756  * support TG3_FLG2_HW_TSO_2 only.
3757  */
3758 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3759 {
3760         struct tg3 *tp = netdev_priv(dev);
3761         dma_addr_t mapping;
3762         u32 len, entry, base_flags, mss;
3763
3764         len = skb_headlen(skb);
3765
3766         /* We are running in BH disabled context with netif_tx_lock
3767          * and TX reclaim runs via tp->poll inside of a software
3768          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3769          * no IRQ context deadlocks to worry about either.  Rejoice!
3770          */
3771         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3772                 if (!netif_queue_stopped(dev)) {
3773                         netif_stop_queue(dev);
3774
3775                         /* This is a hard error, log it. */
3776                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3777                                "queue awake!\n", dev->name);
3778                 }
3779                 return NETDEV_TX_BUSY;
3780         }
3781
3782         entry = tp->tx_prod;
3783         base_flags = 0;
3784 #if TG3_TSO_SUPPORT != 0
3785         mss = 0;
3786         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3787             (mss = skb_shinfo(skb)->gso_size) != 0) {
3788                 int tcp_opt_len, ip_tcp_len;
3789
3790                 if (skb_header_cloned(skb) &&
3791                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3792                         dev_kfree_skb(skb);
3793                         goto out_unlock;
3794                 }
3795
3796                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3797                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3798                 else {
3799                         tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3800                         ip_tcp_len = (skb->nh.iph->ihl * 4) +
3801                                      sizeof(struct tcphdr);
3802
3803                         skb->nh.iph->check = 0;
3804                         skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
3805                                                      tcp_opt_len);
3806                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
3807                 }
3808
3809                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3810                                TXD_FLAG_CPU_POST_DMA);
3811
3812                 skb->h.th->check = 0;
3813
3814         }
3815         else if (skb->ip_summed == CHECKSUM_PARTIAL)
3816                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3817 #else
3818         mss = 0;
3819         if (skb->ip_summed == CHECKSUM_PARTIAL)
3820                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3821 #endif
3822 #if TG3_VLAN_TAG_USED
3823         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3824                 base_flags |= (TXD_FLAG_VLAN |
3825                                (vlan_tx_tag_get(skb) << 16));
3826 #endif
3827
3828         /* Queue skb data, a.k.a. the main skb fragment. */
3829         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3830
3831         tp->tx_buffers[entry].skb = skb;
3832         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3833
3834         tg3_set_txd(tp, entry, mapping, len, base_flags,
3835                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3836
3837         entry = NEXT_TX(entry);
3838
3839         /* Now loop through additional data fragments, and queue them. */
3840         if (skb_shinfo(skb)->nr_frags > 0) {
3841                 unsigned int i, last;
3842
3843                 last = skb_shinfo(skb)->nr_frags - 1;
3844                 for (i = 0; i <= last; i++) {
3845                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3846
3847                         len = frag->size;
3848                         mapping = pci_map_page(tp->pdev,
3849                                                frag->page,
3850                                                frag->page_offset,
3851                                                len, PCI_DMA_TODEVICE);
3852
3853                         tp->tx_buffers[entry].skb = NULL;
3854                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3855
3856                         tg3_set_txd(tp, entry, mapping, len,
3857                                     base_flags, (i == last) | (mss << 1));
3858
3859                         entry = NEXT_TX(entry);
3860                 }
3861         }
3862
3863         /* Packets are ready, update Tx producer idx local and on card. */
3864         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3865
3866         tp->tx_prod = entry;
3867         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
3868                 netif_stop_queue(dev);
3869                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
3870                         netif_wake_queue(tp->dev);
3871         }
3872
3873 out_unlock:
3874         mmiowb();
3875
3876         dev->trans_start = jiffies;
3877
3878         return NETDEV_TX_OK;
3879 }
3880
3881 #if TG3_TSO_SUPPORT != 0
3882 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3883
3884 /* Use GSO to workaround a rare TSO bug that may be triggered when the
3885  * TSO header is greater than 80 bytes.
3886  */
3887 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3888 {
3889         struct sk_buff *segs, *nskb;
3890
3891         /* Estimate the number of fragments in the worst case */
3892         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3893                 netif_stop_queue(tp->dev);
3894                 return NETDEV_TX_BUSY;
3895         }
3896
3897         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3898         if (unlikely(IS_ERR(segs)))
3899                 goto tg3_tso_bug_end;
3900
3901         do {
3902                 nskb = segs;
3903                 segs = segs->next;
3904                 nskb->next = NULL;
3905                 tg3_start_xmit_dma_bug(nskb, tp->dev);
3906         } while (segs);
3907
3908 tg3_tso_bug_end:
3909         dev_kfree_skb(skb);
3910
3911         return NETDEV_TX_OK;
3912 }
3913 #endif
3914
3915 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3916  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3917  */
3918 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3919 {
3920         struct tg3 *tp = netdev_priv(dev);
3921         dma_addr_t mapping;
3922         u32 len, entry, base_flags, mss;
3923         int would_hit_hwbug;
3924
3925         len = skb_headlen(skb);
3926
3927         /* We are running in BH disabled context with netif_tx_lock
3928          * and TX reclaim runs via tp->poll inside of a software
3929          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3930          * no IRQ context deadlocks to worry about either.  Rejoice!
3931          */
3932         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3933                 if (!netif_queue_stopped(dev)) {
3934                         netif_stop_queue(dev);
3935
3936                         /* This is a hard error, log it. */
3937                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3938                                "queue awake!\n", dev->name);
3939                 }
3940                 return NETDEV_TX_BUSY;
3941         }
3942
3943         entry = tp->tx_prod;
3944         base_flags = 0;
3945         if (skb->ip_summed == CHECKSUM_PARTIAL)
3946                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3947 #if TG3_TSO_SUPPORT != 0
3948         mss = 0;
3949         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3950             (mss = skb_shinfo(skb)->gso_size) != 0) {
3951                 int tcp_opt_len, ip_tcp_len, hdr_len;
3952
3953                 if (skb_header_cloned(skb) &&
3954                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3955                         dev_kfree_skb(skb);
3956                         goto out_unlock;
3957                 }
3958
3959                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3960                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3961
3962                 hdr_len = ip_tcp_len + tcp_opt_len;
3963                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
3964                              (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
3965                         return (tg3_tso_bug(tp, skb));
3966
3967                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3968                                TXD_FLAG_CPU_POST_DMA);
3969
3970                 skb->nh.iph->check = 0;
3971                 skb->nh.iph->tot_len = htons(mss + hdr_len);
3972                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3973                         skb->h.th->check = 0;
3974                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3975                 }
3976                 else {
3977                         skb->h.th->check =
3978                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3979                                                    skb->nh.iph->daddr,
3980                                                    0, IPPROTO_TCP, 0);
3981                 }
3982
3983                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3984                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3985                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3986                                 int tsflags;
3987
3988                                 tsflags = ((skb->nh.iph->ihl - 5) +
3989                                            (tcp_opt_len >> 2));
3990                                 mss |= (tsflags << 11);
3991                         }
3992                 } else {
3993                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3994                                 int tsflags;
3995
3996                                 tsflags = ((skb->nh.iph->ihl - 5) +
3997                                            (tcp_opt_len >> 2));
3998                                 base_flags |= tsflags << 12;
3999                         }
4000                 }
4001         }
4002 #else
4003         mss = 0;
4004 #endif
4005 #if TG3_VLAN_TAG_USED
4006         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4007                 base_flags |= (TXD_FLAG_VLAN |
4008                                (vlan_tx_tag_get(skb) << 16));
4009 #endif
4010
4011         /* Queue skb data, a.k.a. the main skb fragment. */
4012         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4013
4014         tp->tx_buffers[entry].skb = skb;
4015         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4016
4017         would_hit_hwbug = 0;
4018
4019         if (tg3_4g_overflow_test(mapping, len))
4020                 would_hit_hwbug = 1;
4021
4022         tg3_set_txd(tp, entry, mapping, len, base_flags,
4023                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4024
4025         entry = NEXT_TX(entry);
4026
4027         /* Now loop through additional data fragments, and queue them. */
4028         if (skb_shinfo(skb)->nr_frags > 0) {
4029                 unsigned int i, last;
4030
4031                 last = skb_shinfo(skb)->nr_frags - 1;
4032                 for (i = 0; i <= last; i++) {
4033                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4034
4035                         len = frag->size;
4036                         mapping = pci_map_page(tp->pdev,
4037                                                frag->page,
4038                                                frag->page_offset,
4039                                                len, PCI_DMA_TODEVICE);
4040
4041                         tp->tx_buffers[entry].skb = NULL;
4042                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4043
4044                         if (tg3_4g_overflow_test(mapping, len))
4045                                 would_hit_hwbug = 1;
4046
4047                         if (tg3_40bit_overflow_test(tp, mapping, len))
4048                                 would_hit_hwbug = 1;
4049
4050                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4051                                 tg3_set_txd(tp, entry, mapping, len,
4052                                             base_flags, (i == last)|(mss << 1));
4053                         else
4054                                 tg3_set_txd(tp, entry, mapping, len,
4055                                             base_flags, (i == last));
4056
4057                         entry = NEXT_TX(entry);
4058                 }
4059         }
4060
4061         if (would_hit_hwbug) {
4062                 u32 last_plus_one = entry;
4063                 u32 start;
4064
4065                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4066                 start &= (TG3_TX_RING_SIZE - 1);
4067
4068                 /* If the workaround fails due to memory/mapping
4069                  * failure, silently drop this packet.
4070                  */
4071                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4072                                                 &start, base_flags, mss))
4073                         goto out_unlock;
4074
4075                 entry = start;
4076         }
4077
4078         /* Packets are ready, update Tx producer idx local and on card. */
4079         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4080
4081         tp->tx_prod = entry;
4082         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4083                 netif_stop_queue(dev);
4084                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
4085                         netif_wake_queue(tp->dev);
4086         }
4087
4088 out_unlock:
4089         mmiowb();
4090
4091         dev->trans_start = jiffies;
4092
4093         return NETDEV_TX_OK;
4094 }
4095
4096 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4097                                int new_mtu)
4098 {
4099         dev->mtu = new_mtu;
4100
4101         if (new_mtu > ETH_DATA_LEN) {
4102                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4103                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4104                         ethtool_op_set_tso(dev, 0);
4105                 }
4106                 else
4107                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4108         } else {
4109                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4110                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4111                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4112         }
4113 }
4114
4115 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4116 {
4117         struct tg3 *tp = netdev_priv(dev);
4118         int err;
4119
4120         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4121                 return -EINVAL;
4122
4123         if (!netif_running(dev)) {
4124                 /* We'll just catch it later when the
4125                  * device is up'd.
4126                  */
4127                 tg3_set_mtu(dev, tp, new_mtu);
4128                 return 0;
4129         }
4130
4131         tg3_netif_stop(tp);
4132
4133         tg3_full_lock(tp, 1);
4134
4135         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4136
4137         tg3_set_mtu(dev, tp, new_mtu);
4138
4139         err = tg3_restart_hw(tp, 0);
4140
4141         if (!err)
4142                 tg3_netif_start(tp);
4143
4144         tg3_full_unlock(tp);
4145
4146         return err;
4147 }
4148
4149 /* Free up pending packets in all rx/tx rings.
4150  *
4151  * The chip has been shut down and the driver detached from
4152  * the networking, so no interrupts or new tx packets will
4153  * end up in the driver.  tp->{tx,}lock is not held and we are not
4154  * in an interrupt context and thus may sleep.
4155  */
4156 static void tg3_free_rings(struct tg3 *tp)
4157 {
4158         struct ring_info *rxp;
4159         int i;
4160
4161         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4162                 rxp = &tp->rx_std_buffers[i];
4163
4164                 if (rxp->skb == NULL)
4165                         continue;
4166                 pci_unmap_single(tp->pdev,
4167                                  pci_unmap_addr(rxp, mapping),
4168                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4169                                  PCI_DMA_FROMDEVICE);
4170                 dev_kfree_skb_any(rxp->skb);
4171                 rxp->skb = NULL;
4172         }
4173
4174         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4175                 rxp = &tp->rx_jumbo_buffers[i];
4176
4177                 if (rxp->skb == NULL)
4178                         continue;
4179                 pci_unmap_single(tp->pdev,
4180                                  pci_unmap_addr(rxp, mapping),
4181                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4182                                  PCI_DMA_FROMDEVICE);
4183                 dev_kfree_skb_any(rxp->skb);
4184                 rxp->skb = NULL;
4185         }
4186
4187         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4188                 struct tx_ring_info *txp;
4189                 struct sk_buff *skb;
4190                 int j;
4191
4192                 txp = &tp->tx_buffers[i];
4193                 skb = txp->skb;
4194
4195                 if (skb == NULL) {
4196                         i++;
4197                         continue;
4198                 }
4199
4200                 pci_unmap_single(tp->pdev,
4201                                  pci_unmap_addr(txp, mapping),
4202                                  skb_headlen(skb),
4203                                  PCI_DMA_TODEVICE);
4204                 txp->skb = NULL;
4205
4206                 i++;
4207
4208                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4209                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4210                         pci_unmap_page(tp->pdev,
4211                                        pci_unmap_addr(txp, mapping),
4212                                        skb_shinfo(skb)->frags[j].size,
4213                                        PCI_DMA_TODEVICE);
4214                         i++;
4215                 }
4216
4217                 dev_kfree_skb_any(skb);
4218         }
4219 }
4220
4221 /* Initialize tx/rx rings for packet processing.
4222  *
4223  * The chip has been shut down and the driver detached from
4224  * the networking, so no interrupts or new tx packets will
4225  * end up in the driver.  tp->{tx,}lock are held and thus
4226  * we may not sleep.
4227  */
4228 static int tg3_init_rings(struct tg3 *tp)
4229 {
4230         u32 i;
4231
4232         /* Free up all the SKBs. */
4233         tg3_free_rings(tp);
4234
4235         /* Zero out all descriptors. */
4236         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4237         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4238         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4239         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4240
4241         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4242         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4243             (tp->dev->mtu > ETH_DATA_LEN))
4244                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4245
4246         /* Initialize invariants of the rings, we only set this
4247          * stuff once.  This works because the card does not
4248          * write into the rx buffer posting rings.
4249          */
4250         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4251                 struct tg3_rx_buffer_desc *rxd;
4252
4253                 rxd = &tp->rx_std[i];
4254                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4255                         << RXD_LEN_SHIFT;
4256                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4257                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4258                                (i << RXD_OPAQUE_INDEX_SHIFT));
4259         }
4260
4261         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4262                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4263                         struct tg3_rx_buffer_desc *rxd;
4264
4265                         rxd = &tp->rx_jumbo[i];
4266                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4267                                 << RXD_LEN_SHIFT;
4268                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4269                                 RXD_FLAG_JUMBO;
4270                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4271                                (i << RXD_OPAQUE_INDEX_SHIFT));
4272                 }
4273         }
4274
4275         /* Now allocate fresh SKBs for each rx ring. */
4276         for (i = 0; i < tp->rx_pending; i++) {
4277                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4278                         printk(KERN_WARNING PFX
4279                                "%s: Using a smaller RX standard ring, "
4280                                "only %d out of %d buffers were allocated "
4281                                "successfully.\n",
4282                                tp->dev->name, i, tp->rx_pending);
4283                         if (i == 0)
4284                                 return -ENOMEM;
4285                         tp->rx_pending = i;
4286                         break;
4287                 }
4288         }
4289
4290         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4291                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4292                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4293                                              -1, i) < 0) {
4294                                 printk(KERN_WARNING PFX
4295                                        "%s: Using a smaller RX jumbo ring, "
4296                                        "only %d out of %d buffers were "
4297                                        "allocated successfully.\n",
4298                                        tp->dev->name, i, tp->rx_jumbo_pending);
4299                                 if (i == 0) {
4300                                         tg3_free_rings(tp);
4301                                         return -ENOMEM;
4302                                 }
4303                                 tp->rx_jumbo_pending = i;
4304                                 break;
4305                         }
4306                 }
4307         }
4308         return 0;
4309 }
4310
4311 /*
4312  * Must not be invoked with interrupt sources disabled and
4313  * the hardware shutdown down.
4314  */
4315 static void tg3_free_consistent(struct tg3 *tp)
4316 {
4317         kfree(tp->rx_std_buffers);
4318         tp->rx_std_buffers = NULL;
4319         if (tp->rx_std) {
4320                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4321                                     tp->rx_std, tp->rx_std_mapping);
4322                 tp->rx_std = NULL;
4323         }
4324         if (tp->rx_jumbo) {
4325                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4326                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4327                 tp->rx_jumbo = NULL;
4328         }
4329         if (tp->rx_rcb) {
4330                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4331                                     tp->rx_rcb, tp->rx_rcb_mapping);
4332                 tp->rx_rcb = NULL;
4333         }
4334         if (tp->tx_ring) {
4335                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4336                         tp->tx_ring, tp->tx_desc_mapping);
4337                 tp->tx_ring = NULL;
4338         }
4339         if (tp->hw_status) {
4340                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4341                                     tp->hw_status, tp->status_mapping);
4342                 tp->hw_status = NULL;
4343         }
4344         if (tp->hw_stats) {
4345                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4346                                     tp->hw_stats, tp->stats_mapping);
4347                 tp->hw_stats = NULL;
4348         }
4349 }
4350
4351 /*
4352  * Must not be invoked with interrupt sources disabled and
4353  * the hardware shutdown down.  Can sleep.
4354  */
4355 static int tg3_alloc_consistent(struct tg3 *tp)
4356 {
4357         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4358                                       (TG3_RX_RING_SIZE +
4359                                        TG3_RX_JUMBO_RING_SIZE)) +
4360                                      (sizeof(struct tx_ring_info) *
4361                                       TG3_TX_RING_SIZE),
4362                                      GFP_KERNEL);
4363         if (!tp->rx_std_buffers)
4364                 return -ENOMEM;
4365
4366         memset(tp->rx_std_buffers, 0,
4367                (sizeof(struct ring_info) *
4368                 (TG3_RX_RING_SIZE +
4369                  TG3_RX_JUMBO_RING_SIZE)) +
4370                (sizeof(struct tx_ring_info) *
4371                 TG3_TX_RING_SIZE));
4372
4373         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4374         tp->tx_buffers = (struct tx_ring_info *)
4375                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4376
4377         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4378                                           &tp->rx_std_mapping);
4379         if (!tp->rx_std)
4380                 goto err_out;
4381
4382         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4383                                             &tp->rx_jumbo_mapping);
4384
4385         if (!tp->rx_jumbo)
4386                 goto err_out;
4387
4388         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4389                                           &tp->rx_rcb_mapping);
4390         if (!tp->rx_rcb)
4391                 goto err_out;
4392
4393         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4394                                            &tp->tx_desc_mapping);
4395         if (!tp->tx_ring)
4396                 goto err_out;
4397
4398         tp->hw_status = pci_alloc_consistent(tp->pdev,
4399                                              TG3_HW_STATUS_SIZE,
4400                                              &tp->status_mapping);
4401         if (!tp->hw_status)
4402                 goto err_out;
4403
4404         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4405                                             sizeof(struct tg3_hw_stats),
4406                                             &tp->stats_mapping);
4407         if (!tp->hw_stats)
4408                 goto err_out;
4409
4410         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4411         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4412
4413         return 0;
4414
4415 err_out:
4416         tg3_free_consistent(tp);
4417         return -ENOMEM;
4418 }
4419
4420 #define MAX_WAIT_CNT 1000
4421
4422 /* To stop a block, clear the enable bit and poll till it
4423  * clears.  tp->lock is held.
4424  */
4425 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4426 {
4427         unsigned int i;
4428         u32 val;
4429
4430         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4431                 switch (ofs) {
4432                 case RCVLSC_MODE:
4433                 case DMAC_MODE:
4434                 case MBFREE_MODE:
4435                 case BUFMGR_MODE:
4436                 case MEMARB_MODE:
4437                         /* We can't enable/disable these bits of the
4438                          * 5705/5750, just say success.
4439                          */
4440                         return 0;
4441
4442                 default:
4443                         break;
4444                 };
4445         }
4446
4447         val = tr32(ofs);
4448         val &= ~enable_bit;
4449         tw32_f(ofs, val);
4450
4451         for (i = 0; i < MAX_WAIT_CNT; i++) {
4452                 udelay(100);
4453                 val = tr32(ofs);
4454                 if ((val & enable_bit) == 0)
4455                         break;
4456         }
4457
4458         if (i == MAX_WAIT_CNT && !silent) {
4459                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4460                        "ofs=%lx enable_bit=%x\n",
4461                        ofs, enable_bit);
4462                 return -ENODEV;
4463         }
4464
4465         return 0;
4466 }
4467
4468 /* tp->lock is held. */
4469 static int tg3_abort_hw(struct tg3 *tp, int silent)
4470 {
4471         int i, err;
4472
4473         tg3_disable_ints(tp);
4474
4475         tp->rx_mode &= ~RX_MODE_ENABLE;
4476         tw32_f(MAC_RX_MODE, tp->rx_mode);
4477         udelay(10);
4478
4479         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4480         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4481         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4482         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4483         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4484         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4485
4486         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4487         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4488         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4489         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4490         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4491         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4492         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4493
4494         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4495         tw32_f(MAC_MODE, tp->mac_mode);
4496         udelay(40);
4497
4498         tp->tx_mode &= ~TX_MODE_ENABLE;
4499         tw32_f(MAC_TX_MODE, tp->tx_mode);
4500
4501         for (i = 0; i < MAX_WAIT_CNT; i++) {
4502                 udelay(100);
4503                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4504                         break;
4505         }
4506         if (i >= MAX_WAIT_CNT) {
4507                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4508                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4509                        tp->dev->name, tr32(MAC_TX_MODE));
4510                 err |= -ENODEV;
4511         }
4512
4513         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4514         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4515         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4516
4517         tw32(FTQ_RESET, 0xffffffff);
4518         tw32(FTQ_RESET, 0x00000000);
4519
4520         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4521         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4522
4523         if (tp->hw_status)
4524                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4525         if (tp->hw_stats)
4526                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4527
4528         return err;
4529 }
4530
4531 /* tp->lock is held. */
4532 static int tg3_nvram_lock(struct tg3 *tp)
4533 {
4534         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4535                 int i;
4536
4537                 if (tp->nvram_lock_cnt == 0) {
4538                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4539                         for (i = 0; i < 8000; i++) {
4540                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4541                                         break;
4542                                 udelay(20);
4543                         }
4544                         if (i == 8000) {
4545                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4546                                 return -ENODEV;
4547                         }
4548                 }
4549                 tp->nvram_lock_cnt++;
4550         }
4551         return 0;
4552 }
4553
4554 /* tp->lock is held. */
4555 static void tg3_nvram_unlock(struct tg3 *tp)
4556 {
4557         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4558                 if (tp->nvram_lock_cnt > 0)
4559                         tp->nvram_lock_cnt--;
4560                 if (tp->nvram_lock_cnt == 0)
4561                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4562         }
4563 }
4564
4565 /* tp->lock is held. */
4566 static void tg3_enable_nvram_access(struct tg3 *tp)
4567 {
4568         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4569             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4570                 u32 nvaccess = tr32(NVRAM_ACCESS);
4571
4572                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4573         }
4574 }
4575
4576 /* tp->lock is held. */
4577 static void tg3_disable_nvram_access(struct tg3 *tp)
4578 {
4579         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4580             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4581                 u32 nvaccess = tr32(NVRAM_ACCESS);
4582
4583                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4584         }
4585 }
4586
4587 /* tp->lock is held. */
4588 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4589 {
4590         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4591                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4592
4593         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4594                 switch (kind) {
4595                 case RESET_KIND_INIT:
4596                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4597                                       DRV_STATE_START);
4598                         break;
4599
4600                 case RESET_KIND_SHUTDOWN:
4601                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4602                                       DRV_STATE_UNLOAD);
4603                         break;
4604
4605                 case RESET_KIND_SUSPEND:
4606                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4607                                       DRV_STATE_SUSPEND);
4608                         break;
4609
4610                 default:
4611                         break;
4612                 };
4613         }
4614 }
4615
4616 /* tp->lock is held. */
4617 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4618 {
4619         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4620                 switch (kind) {
4621                 case RESET_KIND_INIT:
4622                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4623                                       DRV_STATE_START_DONE);
4624                         break;
4625
4626                 case RESET_KIND_SHUTDOWN:
4627                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4628                                       DRV_STATE_UNLOAD_DONE);
4629                         break;
4630
4631                 default:
4632                         break;
4633                 };
4634         }
4635 }
4636
4637 /* tp->lock is held. */
4638 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4639 {
4640         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4641                 switch (kind) {
4642                 case RESET_KIND_INIT:
4643                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4644                                       DRV_STATE_START);
4645                         break;
4646
4647                 case RESET_KIND_SHUTDOWN:
4648                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4649                                       DRV_STATE_UNLOAD);
4650                         break;
4651
4652                 case RESET_KIND_SUSPEND:
4653                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4654                                       DRV_STATE_SUSPEND);
4655                         break;
4656
4657                 default:
4658                         break;
4659                 };
4660         }
4661 }
4662
4663 static void tg3_stop_fw(struct tg3 *);
4664
4665 /* tp->lock is held. */
4666 static int tg3_chip_reset(struct tg3 *tp)
4667 {
4668         u32 val;
4669         void (*write_op)(struct tg3 *, u32, u32);
4670         int i;
4671
4672         tg3_nvram_lock(tp);
4673
4674         /* No matching tg3_nvram_unlock() after this because
4675          * chip reset below will undo the nvram lock.
4676          */
4677         tp->nvram_lock_cnt = 0;
4678
4679         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4680             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4681             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4682                 tw32(GRC_FASTBOOT_PC, 0);
4683
4684         /*
4685          * We must avoid the readl() that normally takes place.
4686          * It locks machines, causes machine checks, and other
4687          * fun things.  So, temporarily disable the 5701
4688          * hardware workaround, while we do the reset.
4689          */
4690         write_op = tp->write32;
4691         if (write_op == tg3_write_flush_reg32)
4692                 tp->write32 = tg3_write32;
4693
4694         /* do the reset */
4695         val = GRC_MISC_CFG_CORECLK_RESET;
4696
4697         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4698                 if (tr32(0x7e2c) == 0x60) {
4699                         tw32(0x7e2c, 0x20);
4700                 }
4701                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4702                         tw32(GRC_MISC_CFG, (1 << 29));
4703                         val |= (1 << 29);
4704                 }
4705         }
4706
4707         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4708                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4709         tw32(GRC_MISC_CFG, val);
4710
4711         /* restore 5701 hardware bug workaround write method */
4712         tp->write32 = write_op;
4713
4714         /* Unfortunately, we have to delay before the PCI read back.
4715          * Some 575X chips even will not respond to a PCI cfg access
4716          * when the reset command is given to the chip.
4717          *
4718          * How do these hardware designers expect things to work
4719          * properly if the PCI write is posted for a long period
4720          * of time?  It is always necessary to have some method by
4721          * which a register read back can occur to push the write
4722          * out which does the reset.
4723          *
4724          * For most tg3 variants the trick below was working.
4725          * Ho hum...
4726          */
4727         udelay(120);
4728
4729         /* Flush PCI posted writes.  The normal MMIO registers
4730          * are inaccessible at this time so this is the only
4731          * way to make this reliably (actually, this is no longer
4732          * the case, see above).  I tried to use indirect
4733          * register read/write but this upset some 5701 variants.
4734          */
4735         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4736
4737         udelay(120);
4738
4739         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4740                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4741                         int i;
4742                         u32 cfg_val;
4743
4744                         /* Wait for link training to complete.  */
4745                         for (i = 0; i < 5000; i++)
4746                                 udelay(100);
4747
4748                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4749                         pci_write_config_dword(tp->pdev, 0xc4,
4750                                                cfg_val | (1 << 15));
4751                 }
4752                 /* Set PCIE max payload size and clear error status.  */
4753                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4754         }
4755
4756         /* Re-enable indirect register accesses. */
4757         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4758                                tp->misc_host_ctrl);
4759
4760         /* Set MAX PCI retry to zero. */
4761         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4762         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4763             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4764                 val |= PCISTATE_RETRY_SAME_DMA;
4765         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4766
4767         pci_restore_state(tp->pdev);
4768
4769         /* Make sure PCI-X relaxed ordering bit is clear. */
4770         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4771         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4772         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4773
4774         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4775                 u32 val;
4776
4777                 /* Chip reset on 5780 will reset MSI enable bit,
4778                  * so need to restore it.
4779                  */
4780                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4781                         u16 ctrl;
4782
4783                         pci_read_config_word(tp->pdev,
4784                                              tp->msi_cap + PCI_MSI_FLAGS,
4785                                              &ctrl);
4786                         pci_write_config_word(tp->pdev,
4787                                               tp->msi_cap + PCI_MSI_FLAGS,
4788                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4789                         val = tr32(MSGINT_MODE);
4790                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4791                 }
4792
4793                 val = tr32(MEMARB_MODE);
4794                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4795
4796         } else
4797                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4798
4799         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4800                 tg3_stop_fw(tp);
4801                 tw32(0x5000, 0x400);
4802         }
4803
4804         tw32(GRC_MODE, tp->grc_mode);
4805
4806         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4807                 u32 val = tr32(0xc4);
4808
4809                 tw32(0xc4, val | (1 << 15));
4810         }
4811
4812         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4813             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4814                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4815                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4816                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4817                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4818         }
4819
4820         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4821                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4822                 tw32_f(MAC_MODE, tp->mac_mode);
4823         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4824                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4825                 tw32_f(MAC_MODE, tp->mac_mode);
4826         } else
4827                 tw32_f(MAC_MODE, 0);
4828         udelay(40);
4829
4830         /* Wait for firmware initialization to complete. */
4831         for (i = 0; i < 100000; i++) {
4832                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4833                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4834                         break;
4835                 udelay(10);
4836         }
4837
4838         /* Chip might not be fitted with firmare.  Some Sun onboard
4839          * parts are configured like that.  So don't signal the timeout
4840          * of the above loop as an error, but do report the lack of
4841          * running firmware once.
4842          */
4843         if (i >= 100000 &&
4844             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4845                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4846
4847                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4848                        tp->dev->name);
4849         }
4850
4851         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4852             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4853                 u32 val = tr32(0x7c00);
4854
4855                 tw32(0x7c00, val | (1 << 25));
4856         }
4857
4858         /* Reprobe ASF enable state.  */
4859         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4860         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4861         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4862         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4863                 u32 nic_cfg;
4864
4865                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4866                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4867                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4868                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4869                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4870                 }
4871         }
4872
4873         return 0;
4874 }
4875
4876 /* tp->lock is held. */
4877 static void tg3_stop_fw(struct tg3 *tp)
4878 {
4879         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4880                 u32 val;
4881                 int i;
4882
4883                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4884                 val = tr32(GRC_RX_CPU_EVENT);
4885                 val |= (1 << 14);
4886                 tw32(GRC_RX_CPU_EVENT, val);
4887
4888                 /* Wait for RX cpu to ACK the event.  */
4889                 for (i = 0; i < 100; i++) {
4890                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4891                                 break;
4892                         udelay(1);
4893                 }
4894         }
4895 }
4896
4897 /* tp->lock is held. */
4898 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4899 {
4900         int err;
4901
4902         tg3_stop_fw(tp);
4903
4904         tg3_write_sig_pre_reset(tp, kind);
4905
4906         tg3_abort_hw(tp, silent);
4907         err = tg3_chip_reset(tp);
4908
4909         tg3_write_sig_legacy(tp, kind);
4910         tg3_write_sig_post_reset(tp, kind);
4911
4912         if (err)
4913                 return err;
4914
4915         return 0;
4916 }
4917
4918 #define TG3_FW_RELEASE_MAJOR    0x0
4919 #define TG3_FW_RELASE_MINOR     0x0
4920 #define TG3_FW_RELEASE_FIX      0x0
4921 #define TG3_FW_START_ADDR       0x08000000
4922 #define TG3_FW_TEXT_ADDR        0x08000000
4923 #define TG3_FW_TEXT_LEN         0x9c0
4924 #define TG3_FW_RODATA_ADDR      0x080009c0
4925 #define TG3_FW_RODATA_LEN       0x60
4926 #define TG3_FW_DATA_ADDR        0x08000a40
4927 #define TG3_FW_DATA_LEN         0x20
4928 #define TG3_FW_SBSS_ADDR        0x08000a60
4929 #define TG3_FW_SBSS_LEN         0xc
4930 #define TG3_FW_BSS_ADDR         0x08000a70
4931 #define TG3_FW_BSS_LEN          0x10
4932
4933 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4934         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4935         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4936         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4937         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4938         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4939         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4940         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4941         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4942         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4943         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4944         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4945         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4946         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4947         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4948         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4949         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4950         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4951         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4952         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4953         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4954         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4955         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4956         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4957         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4958         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4959         0, 0, 0, 0, 0, 0,
4960         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4961         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4962         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4963         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4964         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4965         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4966         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4967         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4968         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4969         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4970         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4971         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4972         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4973         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4974         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4975         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4976         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4977         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4978         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4979         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4980         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4981         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4982         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4983         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4984         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4985         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4986         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4987         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4988         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4989         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4990         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4991         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4992         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4993         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4994         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4995         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4996         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4997         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4998         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4999         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5000         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5001         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5002         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5003         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5004         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5005         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5006         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5007         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5008         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5009         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5010         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5011         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5012         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5013         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5014         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5015         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5016         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5017         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5018         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5019         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5020         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5021         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5022         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5023         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5024         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5025 };
5026
5027 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5028         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5029         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5030         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5031         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5032         0x00000000
5033 };
5034
5035 #if 0 /* All zeros, don't eat up space with it. */
5036 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5037         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5038         0x00000000, 0x00000000, 0x00000000, 0x00000000
5039 };
5040 #endif
5041
5042 #define RX_CPU_SCRATCH_BASE     0x30000
5043 #define RX_CPU_SCRATCH_SIZE     0x04000
5044 #define TX_CPU_SCRATCH_BASE     0x34000
5045 #define TX_CPU_SCRATCH_SIZE     0x04000
5046
5047 /* tp->lock is held. */
5048 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5049 {
5050         int i;
5051
5052         BUG_ON(offset == TX_CPU_BASE &&
5053             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5054
5055         if (offset == RX_CPU_BASE) {
5056                 for (i = 0; i < 10000; i++) {
5057                         tw32(offset + CPU_STATE, 0xffffffff);
5058                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5059                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5060                                 break;
5061                 }
5062
5063                 tw32(offset + CPU_STATE, 0xffffffff);
5064                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5065                 udelay(10);
5066         } else {
5067                 for (i = 0; i < 10000; i++) {
5068                         tw32(offset + CPU_STATE, 0xffffffff);
5069                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5070                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5071                                 break;
5072                 }
5073         }
5074
5075         if (i >= 10000) {
5076                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5077                        "and %s CPU\n",
5078                        tp->dev->name,
5079                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5080                 return -ENODEV;
5081         }
5082
5083         /* Clear firmware's nvram arbitration. */
5084         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5085                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5086         return 0;
5087 }
5088
5089 struct fw_info {
5090         unsigned int text_base;
5091         unsigned int text_len;
5092         const u32 *text_data;
5093         unsigned int rodata_base;
5094         unsigned int rodata_len;
5095         const u32 *rodata_data;
5096         unsigned int data_base;
5097         unsigned int data_len;
5098         const u32 *data_data;
5099 };
5100
5101 /* tp->lock is held. */
5102 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5103                                  int cpu_scratch_size, struct fw_info *info)
5104 {
5105         int err, lock_err, i;
5106         void (*write_op)(struct tg3 *, u32, u32);
5107
5108         if (cpu_base == TX_CPU_BASE &&
5109             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5110                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5111                        "TX cpu firmware on %s which is 5705.\n",
5112                        tp->dev->name);
5113                 return -EINVAL;
5114         }
5115
5116         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5117                 write_op = tg3_write_mem;
5118         else
5119                 write_op = tg3_write_indirect_reg32;
5120
5121         /* It is possible that bootcode is still loading at this point.
5122          * Get the nvram lock first before halting the cpu.
5123          */
5124         lock_err = tg3_nvram_lock(tp);
5125         err = tg3_halt_cpu(tp, cpu_base);
5126         if (!lock_err)
5127                 tg3_nvram_unlock(tp);
5128         if (err)
5129                 goto out;
5130
5131         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5132                 write_op(tp, cpu_scratch_base + i, 0);
5133         tw32(cpu_base + CPU_STATE, 0xffffffff);
5134         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5135         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5136                 write_op(tp, (cpu_scratch_base +
5137                               (info->text_base & 0xffff) +
5138                               (i * sizeof(u32))),
5139                          (info->text_data ?
5140                           info->text_data[i] : 0));
5141         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5142                 write_op(tp, (cpu_scratch_base +
5143                               (info->rodata_base & 0xffff) +
5144                               (i * sizeof(u32))),
5145                          (info->rodata_data ?
5146                           info->rodata_data[i] : 0));
5147         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5148                 write_op(tp, (cpu_scratch_base +
5149                               (info->data_base & 0xffff) +
5150                               (i * sizeof(u32))),
5151                          (info->data_data ?
5152                           info->data_data[i] : 0));
5153
5154         err = 0;
5155
5156 out:
5157         return err;
5158 }
5159
5160 /* tp->lock is held. */
5161 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5162 {
5163         struct fw_info info;
5164         int err, i;
5165
5166         info.text_base = TG3_FW_TEXT_ADDR;
5167         info.text_len = TG3_FW_TEXT_LEN;
5168         info.text_data = &tg3FwText[0];
5169         info.rodata_base = TG3_FW_RODATA_ADDR;
5170         info.rodata_len = TG3_FW_RODATA_LEN;
5171         info.rodata_data = &tg3FwRodata[0];
5172         info.data_base = TG3_FW_DATA_ADDR;
5173         info.data_len = TG3_FW_DATA_LEN;
5174         info.data_data = NULL;
5175
5176         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5177                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5178                                     &info);
5179         if (err)
5180                 return err;
5181
5182         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5183                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5184                                     &info);
5185         if (err)
5186                 return err;
5187
5188         /* Now startup only the RX cpu. */
5189         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5190         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5191
5192         for (i = 0; i < 5; i++) {
5193                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5194                         break;
5195                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5196                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5197                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5198                 udelay(1000);
5199         }
5200         if (i >= 5) {
5201                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5202                        "to set RX CPU PC, is %08x should be %08x\n",
5203                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5204                        TG3_FW_TEXT_ADDR);
5205                 return -ENODEV;
5206         }
5207         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5208         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5209
5210         return 0;
5211 }
5212
5213 #if TG3_TSO_SUPPORT != 0
5214
5215 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5216 #define TG3_TSO_FW_RELASE_MINOR         0x6
5217 #define TG3_TSO_FW_RELEASE_FIX          0x0
5218 #define TG3_TSO_FW_START_ADDR           0x08000000
5219 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5220 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5221 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5222 #define TG3_TSO_FW_RODATA_LEN           0x60
5223 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5224 #define TG3_TSO_FW_DATA_LEN             0x30
5225 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5226 #define TG3_TSO_FW_SBSS_LEN             0x2c
5227 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5228 #define TG3_TSO_FW_BSS_LEN              0x894
5229
5230 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5231         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5232         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5233         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5234         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5235         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5236         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5237         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5238         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5239         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5240         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5241         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5242         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5243         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5244         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5245         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5246         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5247         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5248         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5249         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5250         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5251         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5252         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5253         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5254         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5255         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5256         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5257         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5258         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5259         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5260         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5261         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5262         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5263         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5264         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5265         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5266         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5267         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5268         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5269         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5270         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5271         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5272         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5273         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5274         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5275         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5276         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5277         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5278         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5279         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5280         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5281         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5282         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5283         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5284         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5285         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5286         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5287         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5288         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5289         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5290         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5291         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5292         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5293         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5294         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5295         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5296         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5297         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5298         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5299         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5300         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5301         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5302         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5303         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5304         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5305         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5306         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5307         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5308         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5309         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5310         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5311         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5312         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5313         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5314         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5315         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5316         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5317         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5318         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5319         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5320         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5321         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5322         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5323         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5324         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5325         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5326         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5327         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5328         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5329         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5330         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5331         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5332         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5333         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5334         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5335         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5336         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5337         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5338         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5339         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5340         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5341         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5342         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5343         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5344         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5345         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5346         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5347         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5348         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5349         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5350         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5351         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5352         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5353         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5354         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5355         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5356         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5357         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5358         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5359         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5360         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5361         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5362         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5363         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5364         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5365         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5366         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5367         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5368         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5369         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5370         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5371         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5372         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5373         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5374         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5375         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5376         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5377         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5378         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5379         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5380         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5381         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5382         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5383         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5384         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5385         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5386         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5387         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5388         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5389         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5390         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5391         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5392         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5393         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5394         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5395         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5396         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5397         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5398         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5399         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5400         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5401         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5402         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5403         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5404         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5405         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5406         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5407         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5408         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5409         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5410         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5411         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5412         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5413         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5414         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5415         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5416         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5417         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5418         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5419         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5420         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5421         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5422         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5423         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5424         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5425         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5426         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5427         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5428         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5429         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5430         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5431         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5432         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5433         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5434         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5435         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5436         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5437         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5438         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5439         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5440         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5441         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5442         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5443         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5444         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5445         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5446         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5447         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5448         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5449         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5450         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5451         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5452         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5453         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5454         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5455         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5456         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5457         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5458         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5459         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5460         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5461         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5462         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5463         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5464         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5465         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5466         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5467         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5468         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5469         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5470         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5471         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5472         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5473         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5474         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5475         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5476         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5477         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5478         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5479         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5480         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5481         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5482         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5483         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5484         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5485         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5486         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5487         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5488         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5489         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5490         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5491         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5492         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5493         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5494         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5495         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5496         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5497         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5498         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5499         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5500         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5501         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5502         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5503         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5504         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5505         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5506         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5507         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5508         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5509         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5510         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5511         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5512         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5513         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5514         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5515 };
5516
5517 static const u32 tg3TsoFwRodata[] = {
5518         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5519         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5520         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5521         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5522         0x00000000,
5523 };
5524
5525 static const u32 tg3TsoFwData[] = {
5526         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5527         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5528         0x00000000,
5529 };
5530
5531 /* 5705 needs a special version of the TSO firmware.  */
5532 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5533 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5534 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5535 #define TG3_TSO5_FW_START_ADDR          0x00010000
5536 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5537 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5538 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5539 #define TG3_TSO5_FW_RODATA_LEN          0x50
5540 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5541 #define TG3_TSO5_FW_DATA_LEN            0x20
5542 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5543 #define TG3_TSO5_FW_SBSS_LEN            0x28
5544 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5545 #define TG3_TSO5_FW_BSS_LEN             0x88
5546
5547 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5548         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5549         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5550         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5551         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5552         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5553         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5554         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5555         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5556         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5557         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5558         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5559         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5560         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5561         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5562         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5563         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5564         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5565         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5566         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5567         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5568         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5569         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5570         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5571         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5572         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5573         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5574         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5575         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5576         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5577         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5578         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5579         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5580         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5581         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5582         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5583         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5584         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5585         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5586         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5587         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5588         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5589         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5590         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5591         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5592         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5593         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5594         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5595         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5596         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5597         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5598         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5599         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5600         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5601         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5602         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5603         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5604         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5605         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5606         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5607         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5608         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5609         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5610         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5611         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5612         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5613         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5614         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5615         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5616         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5617         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5618         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5619         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5620         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5621         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5622         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5623         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5624         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5625         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5626         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5627         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5628         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5629         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5630         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5631         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5632         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5633         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5634         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5635         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5636         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5637         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5638         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5639         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5640         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5641         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5642         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5643         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5644         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5645         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5646         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5647         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5648         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5649         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5650         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5651         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5652         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5653         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5654         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5655         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5656         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5657         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5658         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5659         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5660         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5661         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5662         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5663         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5664         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5665         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5666         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5667         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5668         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5669         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5670         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5671         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5672         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5673         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5674         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5675         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5676         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5677         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5678         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5679         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5680         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5681         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5682         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5683         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5684         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5685         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5686         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5687         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5688         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5689         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5690         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5691         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5692         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5693         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5694         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5695         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5696         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5697         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5698         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5699         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5700         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5701         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5702         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5703         0x00000000, 0x00000000, 0x00000000,
5704 };
5705
5706 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5707         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5708         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5709         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5710         0x00000000, 0x00000000, 0x00000000,
5711 };
5712
5713 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5714         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5715         0x00000000, 0x00000000, 0x00000000,
5716 };
5717
5718 /* tp->lock is held. */
5719 static int tg3_load_tso_firmware(struct tg3 *tp)
5720 {
5721         struct fw_info info;
5722         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5723         int err, i;
5724
5725         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5726                 return 0;
5727
5728         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5729                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5730                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5731                 info.text_data = &tg3Tso5FwText[0];
5732                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5733                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5734                 info.rodata_data = &tg3Tso5FwRodata[0];
5735                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5736                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5737                 info.data_data = &tg3Tso5FwData[0];
5738                 cpu_base = RX_CPU_BASE;
5739                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5740                 cpu_scratch_size = (info.text_len +
5741                                     info.rodata_len +
5742                                     info.data_len +
5743                                     TG3_TSO5_FW_SBSS_LEN +
5744                                     TG3_TSO5_FW_BSS_LEN);
5745         } else {
5746                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5747                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5748                 info.text_data = &tg3TsoFwText[0];
5749                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5750                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5751                 info.rodata_data = &tg3TsoFwRodata[0];
5752                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5753                 info.data_len = TG3_TSO_FW_DATA_LEN;
5754                 info.data_data = &tg3TsoFwData[0];
5755                 cpu_base = TX_CPU_BASE;
5756                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5757                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5758         }
5759
5760         err = tg3_load_firmware_cpu(tp, cpu_base,
5761                                     cpu_scratch_base, cpu_scratch_size,
5762                                     &info);
5763         if (err)
5764                 return err;
5765
5766         /* Now startup the cpu. */
5767         tw32(cpu_base + CPU_STATE, 0xffffffff);
5768         tw32_f(cpu_base + CPU_PC,    info.text_base);
5769
5770         for (i = 0; i < 5; i++) {
5771                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5772                         break;
5773                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5774                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5775                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5776                 udelay(1000);
5777         }
5778         if (i >= 5) {
5779                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5780                        "to set CPU PC, is %08x should be %08x\n",
5781                        tp->dev->name, tr32(cpu_base + CPU_PC),
5782                        info.text_base);
5783                 return -ENODEV;
5784         }
5785         tw32(cpu_base + CPU_STATE, 0xffffffff);
5786         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5787         return 0;
5788 }
5789
5790 #endif /* TG3_TSO_SUPPORT != 0 */
5791
5792 /* tp->lock is held. */
5793 static void __tg3_set_mac_addr(struct tg3 *tp)
5794 {
5795         u32 addr_high, addr_low;
5796         int i;
5797
5798         addr_high = ((tp->dev->dev_addr[0] << 8) |
5799                      tp->dev->dev_addr[1]);
5800         addr_low = ((tp->dev->dev_addr[2] << 24) |
5801                     (tp->dev->dev_addr[3] << 16) |
5802                     (tp->dev->dev_addr[4] <<  8) |
5803                     (tp->dev->dev_addr[5] <<  0));
5804         for (i = 0; i < 4; i++) {
5805                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5806                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5807         }
5808
5809         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5810             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5811                 for (i = 0; i < 12; i++) {
5812                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5813                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5814                 }
5815         }
5816
5817         addr_high = (tp->dev->dev_addr[0] +
5818                      tp->dev->dev_addr[1] +
5819                      tp->dev->dev_addr[2] +
5820                      tp->dev->dev_addr[3] +
5821                      tp->dev->dev_addr[4] +
5822                      tp->dev->dev_addr[5]) &
5823                 TX_BACKOFF_SEED_MASK;
5824         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5825 }
5826
5827 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5828 {
5829         struct tg3 *tp = netdev_priv(dev);
5830         struct sockaddr *addr = p;
5831         int err = 0;
5832
5833         if (!is_valid_ether_addr(addr->sa_data))
5834                 return -EINVAL;
5835
5836         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5837
5838         if (!netif_running(dev))
5839                 return 0;
5840
5841         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5842                 /* Reset chip so that ASF can re-init any MAC addresses it
5843                  * needs.
5844                  */
5845                 tg3_netif_stop(tp);
5846                 tg3_full_lock(tp, 1);
5847
5848                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5849                 err = tg3_restart_hw(tp, 0);
5850                 if (!err)
5851                         tg3_netif_start(tp);
5852                 tg3_full_unlock(tp);
5853         } else {
5854                 spin_lock_bh(&tp->lock);
5855                 __tg3_set_mac_addr(tp);
5856                 spin_unlock_bh(&tp->lock);
5857         }
5858
5859         return err;
5860 }
5861
5862 /* tp->lock is held. */
5863 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5864                            dma_addr_t mapping, u32 maxlen_flags,
5865                            u32 nic_addr)
5866 {
5867         tg3_write_mem(tp,
5868                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5869                       ((u64) mapping >> 32));
5870         tg3_write_mem(tp,
5871                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5872                       ((u64) mapping & 0xffffffff));
5873         tg3_write_mem(tp,
5874                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5875                        maxlen_flags);
5876
5877         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5878                 tg3_write_mem(tp,
5879                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5880                               nic_addr);
5881 }
5882
5883 static void __tg3_set_rx_mode(struct net_device *);
5884 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5885 {
5886         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5887         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5888         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5889         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5890         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5891                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5892                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5893         }
5894         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5895         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5896         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5897                 u32 val = ec->stats_block_coalesce_usecs;
5898
5899                 if (!netif_carrier_ok(tp->dev))
5900                         val = 0;
5901
5902                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5903         }
5904 }
5905
5906 /* tp->lock is held. */
5907 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5908 {
5909         u32 val, rdmac_mode;
5910         int i, err, limit;
5911
5912         tg3_disable_ints(tp);
5913
5914         tg3_stop_fw(tp);
5915
5916         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5917
5918         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5919                 tg3_abort_hw(tp, 1);
5920         }
5921
5922         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
5923                 tg3_phy_reset(tp);
5924
5925         err = tg3_chip_reset(tp);
5926         if (err)
5927                 return err;
5928
5929         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5930
5931         /* This works around an issue with Athlon chipsets on
5932          * B3 tigon3 silicon.  This bit has no effect on any
5933          * other revision.  But do not set this on PCI Express
5934          * chips.
5935          */
5936         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5937                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5938         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5939
5940         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5941             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5942                 val = tr32(TG3PCI_PCISTATE);
5943                 val |= PCISTATE_RETRY_SAME_DMA;
5944                 tw32(TG3PCI_PCISTATE, val);
5945         }
5946
5947         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5948                 /* Enable some hw fixes.  */
5949                 val = tr32(TG3PCI_MSI_DATA);
5950                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5951                 tw32(TG3PCI_MSI_DATA, val);
5952         }
5953
5954         /* Descriptor ring init may make accesses to the
5955          * NIC SRAM area to setup the TX descriptors, so we
5956          * can only do this after the hardware has been
5957          * successfully reset.
5958          */
5959         err = tg3_init_rings(tp);
5960         if (err)
5961                 return err;
5962
5963         /* This value is determined during the probe time DMA
5964          * engine test, tg3_test_dma.
5965          */
5966         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5967
5968         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5969                           GRC_MODE_4X_NIC_SEND_RINGS |
5970                           GRC_MODE_NO_TX_PHDR_CSUM |
5971                           GRC_MODE_NO_RX_PHDR_CSUM);
5972         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5973
5974         /* Pseudo-header checksum is done by hardware logic and not
5975          * the offload processers, so make the chip do the pseudo-
5976          * header checksums on receive.  For transmit it is more
5977          * convenient to do the pseudo-header checksum in software
5978          * as Linux does that on transmit for us in all cases.
5979          */
5980         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5981
5982         tw32(GRC_MODE,
5983              tp->grc_mode |
5984              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5985
5986         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5987         val = tr32(GRC_MISC_CFG);
5988         val &= ~0xff;
5989         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5990         tw32(GRC_MISC_CFG, val);
5991
5992         /* Initialize MBUF/DESC pool. */
5993         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5994                 /* Do nothing.  */
5995         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5996                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5997                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5998                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5999                 else
6000                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6001                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6002                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6003         }
6004 #if TG3_TSO_SUPPORT != 0
6005         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6006                 int fw_len;
6007
6008                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6009                           TG3_TSO5_FW_RODATA_LEN +
6010                           TG3_TSO5_FW_DATA_LEN +
6011                           TG3_TSO5_FW_SBSS_LEN +
6012                           TG3_TSO5_FW_BSS_LEN);
6013                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6014                 tw32(BUFMGR_MB_POOL_ADDR,
6015                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6016                 tw32(BUFMGR_MB_POOL_SIZE,
6017                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6018         }
6019 #endif
6020
6021         if (tp->dev->mtu <= ETH_DATA_LEN) {
6022                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6023                      tp->bufmgr_config.mbuf_read_dma_low_water);
6024                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6025                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6026                 tw32(BUFMGR_MB_HIGH_WATER,
6027                      tp->bufmgr_config.mbuf_high_water);
6028         } else {
6029                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6030                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6031                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6032                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6033                 tw32(BUFMGR_MB_HIGH_WATER,
6034                      tp->bufmgr_config.mbuf_high_water_jumbo);
6035         }
6036         tw32(BUFMGR_DMA_LOW_WATER,
6037              tp->bufmgr_config.dma_low_water);
6038         tw32(BUFMGR_DMA_HIGH_WATER,
6039              tp->bufmgr_config.dma_high_water);
6040
6041         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6042         for (i = 0; i < 2000; i++) {
6043                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6044                         break;
6045                 udelay(10);
6046         }
6047         if (i >= 2000) {
6048                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6049                        tp->dev->name);
6050                 return -ENODEV;
6051         }
6052
6053         /* Setup replenish threshold. */
6054         val = tp->rx_pending / 8;
6055         if (val == 0)
6056                 val = 1;
6057         else if (val > tp->rx_std_max_post)
6058                 val = tp->rx_std_max_post;
6059
6060         tw32(RCVBDI_STD_THRESH, val);
6061
6062         /* Initialize TG3_BDINFO's at:
6063          *  RCVDBDI_STD_BD:     standard eth size rx ring
6064          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6065          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6066          *
6067          * like so:
6068          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6069          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6070          *                              ring attribute flags
6071          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6072          *
6073          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6074          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6075          *
6076          * The size of each ring is fixed in the firmware, but the location is
6077          * configurable.
6078          */
6079         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6080              ((u64) tp->rx_std_mapping >> 32));
6081         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6082              ((u64) tp->rx_std_mapping & 0xffffffff));
6083         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6084              NIC_SRAM_RX_BUFFER_DESC);
6085
6086         /* Don't even try to program the JUMBO/MINI buffer descriptor
6087          * configs on 5705.
6088          */
6089         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6090                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6091                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6092         } else {
6093                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6094                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6095
6096                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6097                      BDINFO_FLAGS_DISABLED);
6098
6099                 /* Setup replenish threshold. */
6100                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6101
6102                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6103                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6104                              ((u64) tp->rx_jumbo_mapping >> 32));
6105                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6106                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6107                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6108                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6109                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6110                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6111                 } else {
6112                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6113                              BDINFO_FLAGS_DISABLED);
6114                 }
6115
6116         }
6117
6118         /* There is only one send ring on 5705/5750, no need to explicitly
6119          * disable the others.
6120          */
6121         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6122                 /* Clear out send RCB ring in SRAM. */
6123                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6124                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6125                                       BDINFO_FLAGS_DISABLED);
6126         }
6127
6128         tp->tx_prod = 0;
6129         tp->tx_cons = 0;
6130         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6131         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6132
6133         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6134                        tp->tx_desc_mapping,
6135                        (TG3_TX_RING_SIZE <<
6136                         BDINFO_FLAGS_MAXLEN_SHIFT),
6137                        NIC_SRAM_TX_BUFFER_DESC);
6138
6139         /* There is only one receive return ring on 5705/5750, no need
6140          * to explicitly disable the others.
6141          */
6142         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6143                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6144                      i += TG3_BDINFO_SIZE) {
6145                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6146                                       BDINFO_FLAGS_DISABLED);
6147                 }
6148         }
6149
6150         tp->rx_rcb_ptr = 0;
6151         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6152
6153         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6154                        tp->rx_rcb_mapping,
6155                        (TG3_RX_RCB_RING_SIZE(tp) <<
6156                         BDINFO_FLAGS_MAXLEN_SHIFT),
6157                        0);
6158
6159         tp->rx_std_ptr = tp->rx_pending;
6160         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6161                      tp->rx_std_ptr);
6162
6163         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6164                                                 tp->rx_jumbo_pending : 0;
6165         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6166                      tp->rx_jumbo_ptr);
6167
6168         /* Initialize MAC address and backoff seed. */
6169         __tg3_set_mac_addr(tp);
6170
6171         /* MTU + ethernet header + FCS + optional VLAN tag */
6172         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6173
6174         /* The slot time is changed by tg3_setup_phy if we
6175          * run at gigabit with half duplex.
6176          */
6177         tw32(MAC_TX_LENGTHS,
6178              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6179              (6 << TX_LENGTHS_IPG_SHIFT) |
6180              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6181
6182         /* Receive rules. */
6183         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6184         tw32(RCVLPC_CONFIG, 0x0181);
6185
6186         /* Calculate RDMAC_MODE setting early, we need it to determine
6187          * the RCVLPC_STATE_ENABLE mask.
6188          */
6189         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6190                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6191                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6192                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6193                       RDMAC_MODE_LNGREAD_ENAB);
6194         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6195                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6196
6197         /* If statement applies to 5705 and 5750 PCI devices only */
6198         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6199              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6200             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6201                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6202                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6203                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6204                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6205                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6206                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6207                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6208                 }
6209         }
6210
6211         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6212                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6213
6214 #if TG3_TSO_SUPPORT != 0
6215         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6216                 rdmac_mode |= (1 << 27);
6217 #endif
6218
6219         /* Receive/send statistics. */
6220         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6221                 val = tr32(RCVLPC_STATS_ENABLE);
6222                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6223                 tw32(RCVLPC_STATS_ENABLE, val);
6224         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6225                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6226                 val = tr32(RCVLPC_STATS_ENABLE);
6227                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6228                 tw32(RCVLPC_STATS_ENABLE, val);
6229         } else {
6230                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6231         }
6232         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6233         tw32(SNDDATAI_STATSENAB, 0xffffff);
6234         tw32(SNDDATAI_STATSCTRL,
6235              (SNDDATAI_SCTRL_ENABLE |
6236               SNDDATAI_SCTRL_FASTUPD));
6237
6238         /* Setup host coalescing engine. */
6239         tw32(HOSTCC_MODE, 0);
6240         for (i = 0; i < 2000; i++) {
6241                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6242                         break;
6243                 udelay(10);
6244         }
6245
6246         __tg3_set_coalesce(tp, &tp->coal);
6247
6248         /* set status block DMA address */
6249         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6250              ((u64) tp->status_mapping >> 32));
6251         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6252              ((u64) tp->status_mapping & 0xffffffff));
6253
6254         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6255                 /* Status/statistics block address.  See tg3_timer,
6256                  * the tg3_periodic_fetch_stats call there, and
6257                  * tg3_get_stats to see how this works for 5705/5750 chips.
6258                  */
6259                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6260                      ((u64) tp->stats_mapping >> 32));
6261                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6262                      ((u64) tp->stats_mapping & 0xffffffff));
6263                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6264                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6265         }
6266
6267         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6268
6269         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6270         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6271         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6272                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6273
6274         /* Clear statistics/status block in chip, and status block in ram. */
6275         for (i = NIC_SRAM_STATS_BLK;
6276              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6277              i += sizeof(u32)) {
6278                 tg3_write_mem(tp, i, 0);
6279                 udelay(40);
6280         }
6281         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6282
6283         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6284                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6285                 /* reset to prevent losing 1st rx packet intermittently */
6286                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6287                 udelay(10);
6288         }
6289
6290         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6291                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6292         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6293         udelay(40);
6294
6295         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6296          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6297          * register to preserve the GPIO settings for LOMs. The GPIOs,
6298          * whether used as inputs or outputs, are set by boot code after
6299          * reset.
6300          */
6301         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6302                 u32 gpio_mask;
6303
6304                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6305                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6306
6307                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6308                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6309                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6310
6311                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6312                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6313
6314                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6315
6316                 /* GPIO1 must be driven high for eeprom write protect */
6317                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6318                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6319         }
6320         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6321         udelay(100);
6322
6323         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6324         tp->last_tag = 0;
6325
6326         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6327                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6328                 udelay(40);
6329         }
6330
6331         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6332                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6333                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6334                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6335                WDMAC_MODE_LNGREAD_ENAB);
6336
6337         /* If statement applies to 5705 and 5750 PCI devices only */
6338         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6339              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6340             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6341                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6342                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6343                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6344                         /* nothing */
6345                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6346                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6347                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6348                         val |= WDMAC_MODE_RX_ACCEL;
6349                 }
6350         }
6351
6352         /* Enable host coalescing bug fix */
6353         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6354             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6355                 val |= (1 << 29);
6356
6357         tw32_f(WDMAC_MODE, val);
6358         udelay(40);
6359
6360         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6361                 val = tr32(TG3PCI_X_CAPS);
6362                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6363                         val &= ~PCIX_CAPS_BURST_MASK;
6364                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6365                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6366                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6367                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6368                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6369                                 val |= (tp->split_mode_max_reqs <<
6370                                         PCIX_CAPS_SPLIT_SHIFT);
6371                 }
6372                 tw32(TG3PCI_X_CAPS, val);
6373         }
6374
6375         tw32_f(RDMAC_MODE, rdmac_mode);
6376         udelay(40);
6377
6378         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6379         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6380                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6381         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6382         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6383         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6384         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6385         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6386 #if TG3_TSO_SUPPORT != 0
6387         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6388                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6389 #endif
6390         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6391         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6392
6393         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6394                 err = tg3_load_5701_a0_firmware_fix(tp);
6395                 if (err)
6396                         return err;
6397         }
6398
6399 #if TG3_TSO_SUPPORT != 0
6400         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6401                 err = tg3_load_tso_firmware(tp);
6402                 if (err)
6403                         return err;
6404         }
6405 #endif
6406
6407         tp->tx_mode = TX_MODE_ENABLE;
6408         tw32_f(MAC_TX_MODE, tp->tx_mode);
6409         udelay(100);
6410
6411         tp->rx_mode = RX_MODE_ENABLE;
6412         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6413                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6414
6415         tw32_f(MAC_RX_MODE, tp->rx_mode);
6416         udelay(10);
6417
6418         if (tp->link_config.phy_is_low_power) {
6419                 tp->link_config.phy_is_low_power = 0;
6420                 tp->link_config.speed = tp->link_config.orig_speed;
6421                 tp->link_config.duplex = tp->link_config.orig_duplex;
6422                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6423         }
6424
6425         tp->mi_mode = MAC_MI_MODE_BASE;
6426         tw32_f(MAC_MI_MODE, tp->mi_mode);
6427         udelay(80);
6428
6429         tw32(MAC_LED_CTRL, tp->led_ctrl);
6430
6431         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6432         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6433                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6434                 udelay(10);
6435         }
6436         tw32_f(MAC_RX_MODE, tp->rx_mode);
6437         udelay(10);
6438
6439         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6440                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6441                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6442                         /* Set drive transmission level to 1.2V  */
6443                         /* only if the signal pre-emphasis bit is not set  */
6444                         val = tr32(MAC_SERDES_CFG);
6445                         val &= 0xfffff000;
6446                         val |= 0x880;
6447                         tw32(MAC_SERDES_CFG, val);
6448                 }
6449                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6450                         tw32(MAC_SERDES_CFG, 0x616000);
6451         }
6452
6453         /* Prevent chip from dropping frames when flow control
6454          * is enabled.
6455          */
6456         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6457
6458         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6459             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6460                 /* Use hardware link auto-negotiation */
6461                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6462         }
6463
6464         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6465             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6466                 u32 tmp;
6467
6468                 tmp = tr32(SERDES_RX_CTRL);
6469                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6470                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6471                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6472                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6473         }
6474
6475         err = tg3_setup_phy(tp, reset_phy);
6476         if (err)
6477                 return err;
6478
6479         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6480                 u32 tmp;
6481
6482                 /* Clear CRC stats. */
6483                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6484                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6485                         tg3_readphy(tp, 0x14, &tmp);
6486                 }
6487         }
6488
6489         __tg3_set_rx_mode(tp->dev);
6490
6491         /* Initialize receive rules. */
6492         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6493         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6494         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6495         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6496
6497         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6498             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6499                 limit = 8;
6500         else
6501                 limit = 16;
6502         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6503                 limit -= 4;
6504         switch (limit) {
6505         case 16:
6506                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6507         case 15:
6508                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6509         case 14:
6510                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6511         case 13:
6512                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6513         case 12:
6514                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6515         case 11:
6516                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6517         case 10:
6518                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6519         case 9:
6520                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6521         case 8:
6522                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6523         case 7:
6524                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6525         case 6:
6526                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6527         case 5:
6528                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6529         case 4:
6530                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6531         case 3:
6532                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6533         case 2:
6534         case 1:
6535
6536         default:
6537                 break;
6538         };
6539
6540         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6541
6542         return 0;
6543 }
6544
6545 /* Called at device open time to get the chip ready for
6546  * packet processing.  Invoked with tp->lock held.
6547  */
6548 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6549 {
6550         int err;
6551
6552         /* Force the chip into D0. */
6553         err = tg3_set_power_state(tp, PCI_D0);
6554         if (err)
6555                 goto out;
6556
6557         tg3_switch_clocks(tp);
6558
6559         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6560
6561         err = tg3_reset_hw(tp, reset_phy);
6562
6563 out:
6564         return err;
6565 }
6566
6567 #define TG3_STAT_ADD32(PSTAT, REG) \
6568 do {    u32 __val = tr32(REG); \
6569         (PSTAT)->low += __val; \
6570         if ((PSTAT)->low < __val) \
6571                 (PSTAT)->high += 1; \
6572 } while (0)
6573
6574 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6575 {
6576         struct tg3_hw_stats *sp = tp->hw_stats;
6577
6578         if (!netif_carrier_ok(tp->dev))
6579                 return;
6580
6581         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6582         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6583         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6584         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6585         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6586         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6587         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6588         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6589         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6590         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6591         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6592         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6593         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6594
6595         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6596         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6597         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6598         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6599         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6600         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6601         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6602         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6603         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6604         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6605         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6606         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6607         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6608         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6609
6610         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6611         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6612         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6613 }
6614
6615 static void tg3_timer(unsigned long __opaque)
6616 {
6617         struct tg3 *tp = (struct tg3 *) __opaque;
6618
6619         if (tp->irq_sync)
6620                 goto restart_timer;
6621
6622         spin_lock(&tp->lock);
6623
6624         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6625                 /* All of this garbage is because when using non-tagged
6626                  * IRQ status the mailbox/status_block protocol the chip
6627                  * uses with the cpu is race prone.
6628                  */
6629                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6630                         tw32(GRC_LOCAL_CTRL,
6631                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6632                 } else {
6633                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6634                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6635                 }
6636
6637                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6638                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6639                         spin_unlock(&tp->lock);
6640                         schedule_work(&tp->reset_task);
6641                         return;
6642                 }
6643         }
6644
6645         /* This part only runs once per second. */
6646         if (!--tp->timer_counter) {
6647                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6648                         tg3_periodic_fetch_stats(tp);
6649
6650                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6651                         u32 mac_stat;
6652                         int phy_event;
6653
6654                         mac_stat = tr32(MAC_STATUS);
6655
6656                         phy_event = 0;
6657                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6658                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6659                                         phy_event = 1;
6660                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6661                                 phy_event = 1;
6662
6663                         if (phy_event)
6664                                 tg3_setup_phy(tp, 0);
6665                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6666                         u32 mac_stat = tr32(MAC_STATUS);
6667                         int need_setup = 0;
6668
6669                         if (netif_carrier_ok(tp->dev) &&
6670                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6671                                 need_setup = 1;
6672                         }
6673                         if (! netif_carrier_ok(tp->dev) &&
6674                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6675                                          MAC_STATUS_SIGNAL_DET))) {
6676                                 need_setup = 1;
6677                         }
6678                         if (need_setup) {
6679                                 if (!tp->serdes_counter) {
6680                                         tw32_f(MAC_MODE,
6681                                              (tp->mac_mode &
6682                                               ~MAC_MODE_PORT_MODE_MASK));
6683                                         udelay(40);
6684                                         tw32_f(MAC_MODE, tp->mac_mode);
6685                                         udelay(40);
6686                                 }
6687                                 tg3_setup_phy(tp, 0);
6688                         }
6689                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6690                         tg3_serdes_parallel_detect(tp);
6691
6692                 tp->timer_counter = tp->timer_multiplier;
6693         }
6694
6695         /* Heartbeat is only sent once every 2 seconds.
6696          *
6697          * The heartbeat is to tell the ASF firmware that the host
6698          * driver is still alive.  In the event that the OS crashes,
6699          * ASF needs to reset the hardware to free up the FIFO space
6700          * that may be filled with rx packets destined for the host.
6701          * If the FIFO is full, ASF will no longer function properly.
6702          *
6703          * Unintended resets have been reported on real time kernels
6704          * where the timer doesn't run on time.  Netpoll will also have
6705          * same problem.
6706          *
6707          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
6708          * to check the ring condition when the heartbeat is expiring
6709          * before doing the reset.  This will prevent most unintended
6710          * resets.
6711          */
6712         if (!--tp->asf_counter) {
6713                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6714                         u32 val;
6715
6716                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6717                                       FWCMD_NICDRV_ALIVE3);
6718                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6719                         /* 5 seconds timeout */
6720                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6721                         val = tr32(GRC_RX_CPU_EVENT);
6722                         val |= (1 << 14);
6723                         tw32(GRC_RX_CPU_EVENT, val);
6724                 }
6725                 tp->asf_counter = tp->asf_multiplier;
6726         }
6727
6728         spin_unlock(&tp->lock);
6729
6730 restart_timer:
6731         tp->timer.expires = jiffies + tp->timer_offset;
6732         add_timer(&tp->timer);
6733 }
6734
6735 static int tg3_request_irq(struct tg3 *tp)
6736 {
6737         irqreturn_t (*fn)(int, void *, struct pt_regs *);
6738         unsigned long flags;
6739         struct net_device *dev = tp->dev;
6740
6741         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6742                 fn = tg3_msi;
6743                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6744                         fn = tg3_msi_1shot;
6745                 flags = IRQF_SAMPLE_RANDOM;
6746         } else {
6747                 fn = tg3_interrupt;
6748                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6749                         fn = tg3_interrupt_tagged;
6750                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
6751         }
6752         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6753 }
6754
6755 static int tg3_test_interrupt(struct tg3 *tp)
6756 {
6757         struct net_device *dev = tp->dev;
6758         int err, i;
6759         u32 int_mbox = 0;
6760
6761         if (!netif_running(dev))
6762                 return -ENODEV;
6763
6764         tg3_disable_ints(tp);
6765
6766         free_irq(tp->pdev->irq, dev);
6767
6768         err = request_irq(tp->pdev->irq, tg3_test_isr,
6769                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
6770         if (err)
6771                 return err;
6772
6773         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6774         tg3_enable_ints(tp);
6775
6776         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6777                HOSTCC_MODE_NOW);
6778
6779         for (i = 0; i < 5; i++) {
6780                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6781                                         TG3_64BIT_REG_LOW);
6782                 if (int_mbox != 0)
6783                         break;
6784                 msleep(10);
6785         }
6786
6787         tg3_disable_ints(tp);
6788
6789         free_irq(tp->pdev->irq, dev);
6790
6791         err = tg3_request_irq(tp);
6792
6793         if (err)
6794                 return err;
6795
6796         if (int_mbox != 0)
6797                 return 0;
6798
6799         return -EIO;
6800 }
6801
6802 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6803  * successfully restored
6804  */
6805 static int tg3_test_msi(struct tg3 *tp)
6806 {
6807         struct net_device *dev = tp->dev;
6808         int err;
6809         u16 pci_cmd;
6810
6811         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6812                 return 0;
6813
6814         /* Turn off SERR reporting in case MSI terminates with Master
6815          * Abort.
6816          */
6817         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6818         pci_write_config_word(tp->pdev, PCI_COMMAND,
6819                               pci_cmd & ~PCI_COMMAND_SERR);
6820
6821         err = tg3_test_interrupt(tp);
6822
6823         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6824
6825         if (!err)
6826                 return 0;
6827
6828         /* other failures */
6829         if (err != -EIO)
6830                 return err;
6831
6832         /* MSI test failed, go back to INTx mode */
6833         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6834                "switching to INTx mode. Please report this failure to "
6835                "the PCI maintainer and include system chipset information.\n",
6836                        tp->dev->name);
6837
6838         free_irq(tp->pdev->irq, dev);
6839         pci_disable_msi(tp->pdev);
6840
6841         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6842
6843         err = tg3_request_irq(tp);
6844         if (err)
6845                 return err;
6846
6847         /* Need to reset the chip because the MSI cycle may have terminated
6848          * with Master Abort.
6849          */
6850         tg3_full_lock(tp, 1);
6851
6852         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6853         err = tg3_init_hw(tp, 1);
6854
6855         tg3_full_unlock(tp);
6856
6857         if (err)
6858                 free_irq(tp->pdev->irq, dev);
6859
6860         return err;
6861 }
6862
6863 static int tg3_open(struct net_device *dev)
6864 {
6865         struct tg3 *tp = netdev_priv(dev);
6866         int err;
6867
6868         tg3_full_lock(tp, 0);
6869
6870         err = tg3_set_power_state(tp, PCI_D0);
6871         if (err)
6872                 return err;
6873
6874         tg3_disable_ints(tp);
6875         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6876
6877         tg3_full_unlock(tp);
6878
6879         /* The placement of this call is tied
6880          * to the setup and use of Host TX descriptors.
6881          */
6882         err = tg3_alloc_consistent(tp);
6883         if (err)
6884                 return err;
6885
6886         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6887             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6888             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6889             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6890               (tp->pdev_peer == tp->pdev))) {
6891                 /* All MSI supporting chips should support tagged
6892                  * status.  Assert that this is the case.
6893                  */
6894                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6895                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6896                                "Not using MSI.\n", tp->dev->name);
6897                 } else if (pci_enable_msi(tp->pdev) == 0) {
6898                         u32 msi_mode;
6899
6900                         msi_mode = tr32(MSGINT_MODE);
6901                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6902                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6903                 }
6904         }
6905         err = tg3_request_irq(tp);
6906
6907         if (err) {
6908                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6909                         pci_disable_msi(tp->pdev);
6910                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6911                 }
6912                 tg3_free_consistent(tp);
6913                 return err;
6914         }
6915
6916         tg3_full_lock(tp, 0);
6917
6918         err = tg3_init_hw(tp, 1);
6919         if (err) {
6920                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6921                 tg3_free_rings(tp);
6922         } else {
6923                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6924                         tp->timer_offset = HZ;
6925                 else
6926                         tp->timer_offset = HZ / 10;
6927
6928                 BUG_ON(tp->timer_offset > HZ);
6929                 tp->timer_counter = tp->timer_multiplier =
6930                         (HZ / tp->timer_offset);
6931                 tp->asf_counter = tp->asf_multiplier =
6932                         ((HZ / tp->timer_offset) * 2);
6933
6934                 init_timer(&tp->timer);
6935                 tp->timer.expires = jiffies + tp->timer_offset;
6936                 tp->timer.data = (unsigned long) tp;
6937                 tp->timer.function = tg3_timer;
6938         }
6939
6940         tg3_full_unlock(tp);
6941
6942         if (err) {
6943                 free_irq(tp->pdev->irq, dev);
6944                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6945                         pci_disable_msi(tp->pdev);
6946                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6947                 }
6948                 tg3_free_consistent(tp);
6949                 return err;
6950         }
6951
6952         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6953                 err = tg3_test_msi(tp);
6954
6955                 if (err) {
6956                         tg3_full_lock(tp, 0);
6957
6958                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6959                                 pci_disable_msi(tp->pdev);
6960                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6961                         }
6962                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6963                         tg3_free_rings(tp);
6964                         tg3_free_consistent(tp);
6965
6966                         tg3_full_unlock(tp);
6967
6968                         return err;
6969                 }
6970
6971                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6972                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6973                                 u32 val = tr32(0x7c04);
6974
6975                                 tw32(0x7c04, val | (1 << 29));
6976                         }
6977                 }
6978         }
6979
6980         tg3_full_lock(tp, 0);
6981
6982         add_timer(&tp->timer);
6983         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6984         tg3_enable_ints(tp);
6985
6986         tg3_full_unlock(tp);
6987
6988         netif_start_queue(dev);
6989
6990         return 0;
6991 }
6992
6993 #if 0
6994 /*static*/ void tg3_dump_state(struct tg3 *tp)
6995 {
6996         u32 val32, val32_2, val32_3, val32_4, val32_5;
6997         u16 val16;
6998         int i;
6999
7000         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7001         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7002         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7003                val16, val32);
7004
7005         /* MAC block */
7006         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7007                tr32(MAC_MODE), tr32(MAC_STATUS));
7008         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7009                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7010         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7011                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7012         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7013                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7014
7015         /* Send data initiator control block */
7016         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7017                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7018         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7019                tr32(SNDDATAI_STATSCTRL));
7020
7021         /* Send data completion control block */
7022         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7023
7024         /* Send BD ring selector block */
7025         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7026                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7027
7028         /* Send BD initiator control block */
7029         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7030                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7031
7032         /* Send BD completion control block */
7033         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7034
7035         /* Receive list placement control block */
7036         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7037                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7038         printk("       RCVLPC_STATSCTRL[%08x]\n",
7039                tr32(RCVLPC_STATSCTRL));
7040
7041         /* Receive data and receive BD initiator control block */
7042         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7043                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7044
7045         /* Receive data completion control block */
7046         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7047                tr32(RCVDCC_MODE));
7048
7049         /* Receive BD initiator control block */
7050         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7051                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7052
7053         /* Receive BD completion control block */
7054         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7055                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7056
7057         /* Receive list selector control block */
7058         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7059                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7060
7061         /* Mbuf cluster free block */
7062         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7063                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7064
7065         /* Host coalescing control block */
7066         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7067                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7068         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7069                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7070                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7071         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7072                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7073                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7074         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7075                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7076         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7077                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7078
7079         /* Memory arbiter control block */
7080         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7081                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7082
7083         /* Buffer manager control block */
7084         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7085                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7086         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7087                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7088         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7089                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7090                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7091                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7092
7093         /* Read DMA control block */
7094         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7095                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7096
7097         /* Write DMA control block */
7098         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7099                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7100
7101         /* DMA completion block */
7102         printk("DEBUG: DMAC_MODE[%08x]\n",
7103                tr32(DMAC_MODE));
7104
7105         /* GRC block */
7106         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7107                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7108         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7109                tr32(GRC_LOCAL_CTRL));
7110
7111         /* TG3_BDINFOs */
7112         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7113                tr32(RCVDBDI_JUMBO_BD + 0x0),
7114                tr32(RCVDBDI_JUMBO_BD + 0x4),
7115                tr32(RCVDBDI_JUMBO_BD + 0x8),
7116                tr32(RCVDBDI_JUMBO_BD + 0xc));
7117         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7118                tr32(RCVDBDI_STD_BD + 0x0),
7119                tr32(RCVDBDI_STD_BD + 0x4),
7120                tr32(RCVDBDI_STD_BD + 0x8),
7121                tr32(RCVDBDI_STD_BD + 0xc));
7122         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7123                tr32(RCVDBDI_MINI_BD + 0x0),
7124                tr32(RCVDBDI_MINI_BD + 0x4),
7125                tr32(RCVDBDI_MINI_BD + 0x8),
7126                tr32(RCVDBDI_MINI_BD + 0xc));
7127
7128         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7129         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7130         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7131         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7132         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7133                val32, val32_2, val32_3, val32_4);
7134
7135         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7136         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7137         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7138         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7139         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7140                val32, val32_2, val32_3, val32_4);
7141
7142         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7143         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7144         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7145         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7146         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7147         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7148                val32, val32_2, val32_3, val32_4, val32_5);
7149
7150         /* SW status block */
7151         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7152                tp->hw_status->status,
7153                tp->hw_status->status_tag,
7154                tp->hw_status->rx_jumbo_consumer,
7155                tp->hw_status->rx_consumer,
7156                tp->hw_status->rx_mini_consumer,
7157                tp->hw_status->idx[0].rx_producer,
7158                tp->hw_status->idx[0].tx_consumer);
7159
7160         /* SW statistics block */
7161         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7162                ((u32 *)tp->hw_stats)[0],
7163                ((u32 *)tp->hw_stats)[1],
7164                ((u32 *)tp->hw_stats)[2],
7165                ((u32 *)tp->hw_stats)[3]);
7166
7167         /* Mailboxes */
7168         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7169                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7170                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7171                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7172                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7173
7174         /* NIC side send descriptors. */
7175         for (i = 0; i < 6; i++) {
7176                 unsigned long txd;
7177
7178                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7179                         + (i * sizeof(struct tg3_tx_buffer_desc));
7180                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7181                        i,
7182                        readl(txd + 0x0), readl(txd + 0x4),
7183                        readl(txd + 0x8), readl(txd + 0xc));
7184         }
7185
7186         /* NIC side RX descriptors. */
7187         for (i = 0; i < 6; i++) {
7188                 unsigned long rxd;
7189
7190                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7191                         + (i * sizeof(struct tg3_rx_buffer_desc));
7192                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7193                        i,
7194                        readl(rxd + 0x0), readl(rxd + 0x4),
7195                        readl(rxd + 0x8), readl(rxd + 0xc));
7196                 rxd += (4 * sizeof(u32));
7197                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7198                        i,
7199                        readl(rxd + 0x0), readl(rxd + 0x4),
7200                        readl(rxd + 0x8), readl(rxd + 0xc));
7201         }
7202
7203         for (i = 0; i < 6; i++) {
7204                 unsigned long rxd;
7205
7206                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7207                         + (i * sizeof(struct tg3_rx_buffer_desc));
7208                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7209                        i,
7210                        readl(rxd + 0x0), readl(rxd + 0x4),
7211                        readl(rxd + 0x8), readl(rxd + 0xc));
7212                 rxd += (4 * sizeof(u32));
7213                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7214                        i,
7215                        readl(rxd + 0x0), readl(rxd + 0x4),
7216                        readl(rxd + 0x8), readl(rxd + 0xc));
7217         }
7218 }
7219 #endif
7220
7221 static struct net_device_stats *tg3_get_stats(struct net_device *);
7222 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7223
7224 static int tg3_close(struct net_device *dev)
7225 {
7226         struct tg3 *tp = netdev_priv(dev);
7227
7228         /* Calling flush_scheduled_work() may deadlock because
7229          * linkwatch_event() may be on the workqueue and it will try to get
7230          * the rtnl_lock which we are holding.
7231          */
7232         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7233                 msleep(1);
7234
7235         netif_stop_queue(dev);
7236
7237         del_timer_sync(&tp->timer);
7238
7239         tg3_full_lock(tp, 1);
7240 #if 0
7241         tg3_dump_state(tp);
7242 #endif
7243
7244         tg3_disable_ints(tp);
7245
7246         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7247         tg3_free_rings(tp);
7248         tp->tg3_flags &=
7249                 ~(TG3_FLAG_INIT_COMPLETE |
7250                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7251
7252         tg3_full_unlock(tp);
7253
7254         free_irq(tp->pdev->irq, dev);
7255         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7256                 pci_disable_msi(tp->pdev);
7257                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7258         }
7259
7260         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7261                sizeof(tp->net_stats_prev));
7262         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7263                sizeof(tp->estats_prev));
7264
7265         tg3_free_consistent(tp);
7266
7267         tg3_set_power_state(tp, PCI_D3hot);
7268
7269         netif_carrier_off(tp->dev);
7270
7271         return 0;
7272 }
7273
7274 static inline unsigned long get_stat64(tg3_stat64_t *val)
7275 {
7276         unsigned long ret;
7277
7278 #if (BITS_PER_LONG == 32)
7279         ret = val->low;
7280 #else
7281         ret = ((u64)val->high << 32) | ((u64)val->low);
7282 #endif
7283         return ret;
7284 }
7285
7286 static unsigned long calc_crc_errors(struct tg3 *tp)
7287 {
7288         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7289
7290         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7291             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7292              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7293                 u32 val;
7294
7295                 spin_lock_bh(&tp->lock);
7296                 if (!tg3_readphy(tp, 0x1e, &val)) {
7297                         tg3_writephy(tp, 0x1e, val | 0x8000);
7298                         tg3_readphy(tp, 0x14, &val);
7299                 } else
7300                         val = 0;
7301                 spin_unlock_bh(&tp->lock);
7302
7303                 tp->phy_crc_errors += val;
7304
7305                 return tp->phy_crc_errors;
7306         }
7307
7308         return get_stat64(&hw_stats->rx_fcs_errors);
7309 }
7310
7311 #define ESTAT_ADD(member) \
7312         estats->member =        old_estats->member + \
7313                                 get_stat64(&hw_stats->member)
7314
7315 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7316 {
7317         struct tg3_ethtool_stats *estats = &tp->estats;
7318         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7319         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7320
7321         if (!hw_stats)
7322                 return old_estats;
7323
7324         ESTAT_ADD(rx_octets);
7325         ESTAT_ADD(rx_fragments);
7326         ESTAT_ADD(rx_ucast_packets);
7327         ESTAT_ADD(rx_mcast_packets);
7328         ESTAT_ADD(rx_bcast_packets);
7329         ESTAT_ADD(rx_fcs_errors);
7330         ESTAT_ADD(rx_align_errors);
7331         ESTAT_ADD(rx_xon_pause_rcvd);
7332         ESTAT_ADD(rx_xoff_pause_rcvd);
7333         ESTAT_ADD(rx_mac_ctrl_rcvd);
7334         ESTAT_ADD(rx_xoff_entered);
7335         ESTAT_ADD(rx_frame_too_long_errors);
7336         ESTAT_ADD(rx_jabbers);
7337         ESTAT_ADD(rx_undersize_packets);
7338         ESTAT_ADD(rx_in_length_errors);
7339         ESTAT_ADD(rx_out_length_errors);
7340         ESTAT_ADD(rx_64_or_less_octet_packets);
7341         ESTAT_ADD(rx_65_to_127_octet_packets);
7342         ESTAT_ADD(rx_128_to_255_octet_packets);
7343         ESTAT_ADD(rx_256_to_511_octet_packets);
7344         ESTAT_ADD(rx_512_to_1023_octet_packets);
7345         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7346         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7347         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7348         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7349         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7350
7351         ESTAT_ADD(tx_octets);
7352         ESTAT_ADD(tx_collisions);
7353         ESTAT_ADD(tx_xon_sent);
7354         ESTAT_ADD(tx_xoff_sent);
7355         ESTAT_ADD(tx_flow_control);
7356         ESTAT_ADD(tx_mac_errors);
7357         ESTAT_ADD(tx_single_collisions);
7358         ESTAT_ADD(tx_mult_collisions);
7359         ESTAT_ADD(tx_deferred);
7360         ESTAT_ADD(tx_excessive_collisions);
7361         ESTAT_ADD(tx_late_collisions);
7362         ESTAT_ADD(tx_collide_2times);
7363         ESTAT_ADD(tx_collide_3times);
7364         ESTAT_ADD(tx_collide_4times);
7365         ESTAT_ADD(tx_collide_5times);
7366         ESTAT_ADD(tx_collide_6times);
7367         ESTAT_ADD(tx_collide_7times);
7368         ESTAT_ADD(tx_collide_8times);
7369         ESTAT_ADD(tx_collide_9times);
7370         ESTAT_ADD(tx_collide_10times);
7371         ESTAT_ADD(tx_collide_11times);
7372         ESTAT_ADD(tx_collide_12times);
7373         ESTAT_ADD(tx_collide_13times);
7374         ESTAT_ADD(tx_collide_14times);
7375         ESTAT_ADD(tx_collide_15times);
7376         ESTAT_ADD(tx_ucast_packets);
7377         ESTAT_ADD(tx_mcast_packets);
7378         ESTAT_ADD(tx_bcast_packets);
7379         ESTAT_ADD(tx_carrier_sense_errors);
7380         ESTAT_ADD(tx_discards);
7381         ESTAT_ADD(tx_errors);
7382
7383         ESTAT_ADD(dma_writeq_full);
7384         ESTAT_ADD(dma_write_prioq_full);
7385         ESTAT_ADD(rxbds_empty);
7386         ESTAT_ADD(rx_discards);
7387         ESTAT_ADD(rx_errors);
7388         ESTAT_ADD(rx_threshold_hit);
7389
7390         ESTAT_ADD(dma_readq_full);
7391         ESTAT_ADD(dma_read_prioq_full);
7392         ESTAT_ADD(tx_comp_queue_full);
7393
7394         ESTAT_ADD(ring_set_send_prod_index);
7395         ESTAT_ADD(ring_status_update);
7396         ESTAT_ADD(nic_irqs);
7397         ESTAT_ADD(nic_avoided_irqs);
7398         ESTAT_ADD(nic_tx_threshold_hit);
7399
7400         return estats;
7401 }
7402
7403 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7404 {
7405         struct tg3 *tp = netdev_priv(dev);
7406         struct net_device_stats *stats = &tp->net_stats;
7407         struct net_device_stats *old_stats = &tp->net_stats_prev;
7408         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7409
7410         if (!hw_stats)
7411                 return old_stats;
7412
7413         stats->rx_packets = old_stats->rx_packets +
7414                 get_stat64(&hw_stats->rx_ucast_packets) +
7415                 get_stat64(&hw_stats->rx_mcast_packets) +
7416                 get_stat64(&hw_stats->rx_bcast_packets);
7417
7418         stats->tx_packets = old_stats->tx_packets +
7419                 get_stat64(&hw_stats->tx_ucast_packets) +
7420                 get_stat64(&hw_stats->tx_mcast_packets) +
7421                 get_stat64(&hw_stats->tx_bcast_packets);
7422
7423         stats->rx_bytes = old_stats->rx_bytes +
7424                 get_stat64(&hw_stats->rx_octets);
7425         stats->tx_bytes = old_stats->tx_bytes +
7426                 get_stat64(&hw_stats->tx_octets);
7427
7428         stats->rx_errors = old_stats->rx_errors +
7429                 get_stat64(&hw_stats->rx_errors);
7430         stats->tx_errors = old_stats->tx_errors +
7431                 get_stat64(&hw_stats->tx_errors) +
7432                 get_stat64(&hw_stats->tx_mac_errors) +
7433                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7434                 get_stat64(&hw_stats->tx_discards);
7435
7436         stats->multicast = old_stats->multicast +
7437                 get_stat64(&hw_stats->rx_mcast_packets);
7438         stats->collisions = old_stats->collisions +
7439                 get_stat64(&hw_stats->tx_collisions);
7440
7441         stats->rx_length_errors = old_stats->rx_length_errors +
7442                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7443                 get_stat64(&hw_stats->rx_undersize_packets);
7444
7445         stats->rx_over_errors = old_stats->rx_over_errors +
7446                 get_stat64(&hw_stats->rxbds_empty);
7447         stats->rx_frame_errors = old_stats->rx_frame_errors +
7448                 get_stat64(&hw_stats->rx_align_errors);
7449         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7450                 get_stat64(&hw_stats->tx_discards);
7451         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7452                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7453
7454         stats->rx_crc_errors = old_stats->rx_crc_errors +
7455                 calc_crc_errors(tp);
7456
7457         stats->rx_missed_errors = old_stats->rx_missed_errors +
7458                 get_stat64(&hw_stats->rx_discards);
7459
7460         return stats;
7461 }
7462
7463 static inline u32 calc_crc(unsigned char *buf, int len)
7464 {
7465         u32 reg;
7466         u32 tmp;
7467         int j, k;
7468
7469         reg = 0xffffffff;
7470
7471         for (j = 0; j < len; j++) {
7472                 reg ^= buf[j];
7473
7474                 for (k = 0; k < 8; k++) {
7475                         tmp = reg & 0x01;
7476
7477                         reg >>= 1;
7478
7479                         if (tmp) {
7480                                 reg ^= 0xedb88320;
7481                         }
7482                 }
7483         }
7484
7485         return ~reg;
7486 }
7487
7488 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7489 {
7490         /* accept or reject all multicast frames */
7491         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7492         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7493         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7494         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7495 }
7496
7497 static void __tg3_set_rx_mode(struct net_device *dev)
7498 {
7499         struct tg3 *tp = netdev_priv(dev);
7500         u32 rx_mode;
7501
7502         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7503                                   RX_MODE_KEEP_VLAN_TAG);
7504
7505         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7506          * flag clear.
7507          */
7508 #if TG3_VLAN_TAG_USED
7509         if (!tp->vlgrp &&
7510             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7511                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7512 #else
7513         /* By definition, VLAN is disabled always in this
7514          * case.
7515          */
7516         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7517                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7518 #endif
7519
7520         if (dev->flags & IFF_PROMISC) {
7521                 /* Promiscuous mode. */
7522                 rx_mode |= RX_MODE_PROMISC;
7523         } else if (dev->flags & IFF_ALLMULTI) {
7524                 /* Accept all multicast. */
7525                 tg3_set_multi (tp, 1);
7526         } else if (dev->mc_count < 1) {
7527                 /* Reject all multicast. */
7528                 tg3_set_multi (tp, 0);
7529         } else {
7530                 /* Accept one or more multicast(s). */
7531                 struct dev_mc_list *mclist;
7532                 unsigned int i;
7533                 u32 mc_filter[4] = { 0, };
7534                 u32 regidx;
7535                 u32 bit;
7536                 u32 crc;
7537
7538                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7539                      i++, mclist = mclist->next) {
7540
7541                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7542                         bit = ~crc & 0x7f;
7543                         regidx = (bit & 0x60) >> 5;
7544                         bit &= 0x1f;
7545                         mc_filter[regidx] |= (1 << bit);
7546                 }
7547
7548                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7549                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7550                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7551                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7552         }
7553
7554         if (rx_mode != tp->rx_mode) {
7555                 tp->rx_mode = rx_mode;
7556                 tw32_f(MAC_RX_MODE, rx_mode);
7557                 udelay(10);
7558         }
7559 }
7560
7561 static void tg3_set_rx_mode(struct net_device *dev)
7562 {
7563         struct tg3 *tp = netdev_priv(dev);
7564
7565         if (!netif_running(dev))
7566                 return;
7567
7568         tg3_full_lock(tp, 0);
7569         __tg3_set_rx_mode(dev);
7570         tg3_full_unlock(tp);
7571 }
7572
7573 #define TG3_REGDUMP_LEN         (32 * 1024)
7574
7575 static int tg3_get_regs_len(struct net_device *dev)
7576 {
7577         return TG3_REGDUMP_LEN;
7578 }
7579
7580 static void tg3_get_regs(struct net_device *dev,
7581                 struct ethtool_regs *regs, void *_p)
7582 {
7583         u32 *p = _p;
7584         struct tg3 *tp = netdev_priv(dev);
7585         u8 *orig_p = _p;
7586         int i;
7587
7588         regs->version = 0;
7589
7590         memset(p, 0, TG3_REGDUMP_LEN);
7591
7592         if (tp->link_config.phy_is_low_power)
7593                 return;
7594
7595         tg3_full_lock(tp, 0);
7596
7597 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7598 #define GET_REG32_LOOP(base,len)                \
7599 do {    p = (u32 *)(orig_p + (base));           \
7600         for (i = 0; i < len; i += 4)            \
7601                 __GET_REG32((base) + i);        \
7602 } while (0)
7603 #define GET_REG32_1(reg)                        \
7604 do {    p = (u32 *)(orig_p + (reg));            \
7605         __GET_REG32((reg));                     \
7606 } while (0)
7607
7608         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7609         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7610         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7611         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7612         GET_REG32_1(SNDDATAC_MODE);
7613         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7614         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7615         GET_REG32_1(SNDBDC_MODE);
7616         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7617         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7618         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7619         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7620         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7621         GET_REG32_1(RCVDCC_MODE);
7622         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7623         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7624         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7625         GET_REG32_1(MBFREE_MODE);
7626         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7627         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7628         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7629         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7630         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7631         GET_REG32_1(RX_CPU_MODE);
7632         GET_REG32_1(RX_CPU_STATE);
7633         GET_REG32_1(RX_CPU_PGMCTR);
7634         GET_REG32_1(RX_CPU_HWBKPT);
7635         GET_REG32_1(TX_CPU_MODE);
7636         GET_REG32_1(TX_CPU_STATE);
7637         GET_REG32_1(TX_CPU_PGMCTR);
7638         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7639         GET_REG32_LOOP(FTQ_RESET, 0x120);
7640         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7641         GET_REG32_1(DMAC_MODE);
7642         GET_REG32_LOOP(GRC_MODE, 0x4c);
7643         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7644                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7645
7646 #undef __GET_REG32
7647 #undef GET_REG32_LOOP
7648 #undef GET_REG32_1
7649
7650         tg3_full_unlock(tp);
7651 }
7652
7653 static int tg3_get_eeprom_len(struct net_device *dev)
7654 {
7655         struct tg3 *tp = netdev_priv(dev);
7656
7657         return tp->nvram_size;
7658 }
7659
7660 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7661 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7662
7663 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7664 {
7665         struct tg3 *tp = netdev_priv(dev);
7666         int ret;
7667         u8  *pd;
7668         u32 i, offset, len, val, b_offset, b_count;
7669
7670         if (tp->link_config.phy_is_low_power)
7671                 return -EAGAIN;
7672
7673         offset = eeprom->offset;
7674         len = eeprom->len;
7675         eeprom->len = 0;
7676
7677         eeprom->magic = TG3_EEPROM_MAGIC;
7678
7679         if (offset & 3) {
7680                 /* adjustments to start on required 4 byte boundary */
7681                 b_offset = offset & 3;
7682                 b_count = 4 - b_offset;
7683                 if (b_count > len) {
7684                         /* i.e. offset=1 len=2 */
7685                         b_count = len;
7686                 }
7687                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7688                 if (ret)
7689                         return ret;
7690                 val = cpu_to_le32(val);
7691                 memcpy(data, ((char*)&val) + b_offset, b_count);
7692                 len -= b_count;
7693                 offset += b_count;
7694                 eeprom->len += b_count;
7695         }
7696
7697         /* read bytes upto the last 4 byte boundary */
7698         pd = &data[eeprom->len];
7699         for (i = 0; i < (len - (len & 3)); i += 4) {
7700                 ret = tg3_nvram_read(tp, offset + i, &val);
7701                 if (ret) {
7702                         eeprom->len += i;
7703                         return ret;
7704                 }
7705                 val = cpu_to_le32(val);
7706                 memcpy(pd + i, &val, 4);
7707         }
7708         eeprom->len += i;
7709
7710         if (len & 3) {
7711                 /* read last bytes not ending on 4 byte boundary */
7712                 pd = &data[eeprom->len];
7713                 b_count = len & 3;
7714                 b_offset = offset + len - b_count;
7715                 ret = tg3_nvram_read(tp, b_offset, &val);
7716                 if (ret)
7717                         return ret;
7718                 val = cpu_to_le32(val);
7719                 memcpy(pd, ((char*)&val), b_count);
7720                 eeprom->len += b_count;
7721         }
7722         return 0;
7723 }
7724
7725 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7726
7727 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7728 {
7729         struct tg3 *tp = netdev_priv(dev);
7730         int ret;
7731         u32 offset, len, b_offset, odd_len, start, end;
7732         u8 *buf;
7733
7734         if (tp->link_config.phy_is_low_power)
7735                 return -EAGAIN;
7736
7737         if (eeprom->magic != TG3_EEPROM_MAGIC)
7738                 return -EINVAL;
7739
7740         offset = eeprom->offset;
7741         len = eeprom->len;
7742
7743         if ((b_offset = (offset & 3))) {
7744                 /* adjustments to start on required 4 byte boundary */
7745                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7746                 if (ret)
7747                         return ret;
7748                 start = cpu_to_le32(start);
7749                 len += b_offset;
7750                 offset &= ~3;
7751                 if (len < 4)
7752                         len = 4;
7753         }
7754
7755         odd_len = 0;
7756         if (len & 3) {
7757                 /* adjustments to end on required 4 byte boundary */
7758                 odd_len = 1;
7759                 len = (len + 3) & ~3;
7760                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7761                 if (ret)
7762                         return ret;
7763                 end = cpu_to_le32(end);
7764         }
7765
7766         buf = data;
7767         if (b_offset || odd_len) {
7768                 buf = kmalloc(len, GFP_KERNEL);
7769                 if (buf == 0)
7770                         return -ENOMEM;
7771                 if (b_offset)
7772                         memcpy(buf, &start, 4);
7773                 if (odd_len)
7774                         memcpy(buf+len-4, &end, 4);
7775                 memcpy(buf + b_offset, data, eeprom->len);
7776         }
7777
7778         ret = tg3_nvram_write_block(tp, offset, len, buf);
7779
7780         if (buf != data)
7781                 kfree(buf);
7782
7783         return ret;
7784 }
7785
7786 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7787 {
7788         struct tg3 *tp = netdev_priv(dev);
7789
7790         cmd->supported = (SUPPORTED_Autoneg);
7791
7792         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7793                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7794                                    SUPPORTED_1000baseT_Full);
7795
7796         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7797                 cmd->supported |= (SUPPORTED_100baseT_Half |
7798                                   SUPPORTED_100baseT_Full |
7799                                   SUPPORTED_10baseT_Half |
7800                                   SUPPORTED_10baseT_Full |
7801                                   SUPPORTED_MII);
7802                 cmd->port = PORT_TP;
7803         } else {
7804                 cmd->supported |= SUPPORTED_FIBRE;
7805                 cmd->port = PORT_FIBRE;
7806         }
7807
7808         cmd->advertising = tp->link_config.advertising;
7809         if (netif_running(dev)) {
7810                 cmd->speed = tp->link_config.active_speed;
7811                 cmd->duplex = tp->link_config.active_duplex;
7812         }
7813         cmd->phy_address = PHY_ADDR;
7814         cmd->transceiver = 0;
7815         cmd->autoneg = tp->link_config.autoneg;
7816         cmd->maxtxpkt = 0;
7817         cmd->maxrxpkt = 0;
7818         return 0;
7819 }
7820
7821 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7822 {
7823         struct tg3 *tp = netdev_priv(dev);
7824
7825         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
7826                 /* These are the only valid advertisement bits allowed.  */
7827                 if (cmd->autoneg == AUTONEG_ENABLE &&
7828                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7829                                           ADVERTISED_1000baseT_Full |
7830                                           ADVERTISED_Autoneg |
7831                                           ADVERTISED_FIBRE)))
7832                         return -EINVAL;
7833                 /* Fiber can only do SPEED_1000.  */
7834                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7835                          (cmd->speed != SPEED_1000))
7836                         return -EINVAL;
7837         /* Copper cannot force SPEED_1000.  */
7838         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7839                    (cmd->speed == SPEED_1000))
7840                 return -EINVAL;
7841         else if ((cmd->speed == SPEED_1000) &&
7842                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7843                 return -EINVAL;
7844
7845         tg3_full_lock(tp, 0);
7846
7847         tp->link_config.autoneg = cmd->autoneg;
7848         if (cmd->autoneg == AUTONEG_ENABLE) {
7849                 tp->link_config.advertising = cmd->advertising;
7850                 tp->link_config.speed = SPEED_INVALID;
7851                 tp->link_config.duplex = DUPLEX_INVALID;
7852         } else {
7853                 tp->link_config.advertising = 0;
7854                 tp->link_config.speed = cmd->speed;
7855                 tp->link_config.duplex = cmd->duplex;
7856         }
7857
7858         if (netif_running(dev))
7859                 tg3_setup_phy(tp, 1);
7860
7861         tg3_full_unlock(tp);
7862
7863         return 0;
7864 }
7865
7866 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7867 {
7868         struct tg3 *tp = netdev_priv(dev);
7869
7870         strcpy(info->driver, DRV_MODULE_NAME);
7871         strcpy(info->version, DRV_MODULE_VERSION);
7872         strcpy(info->fw_version, tp->fw_ver);
7873         strcpy(info->bus_info, pci_name(tp->pdev));
7874 }
7875
7876 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7877 {
7878         struct tg3 *tp = netdev_priv(dev);
7879
7880         wol->supported = WAKE_MAGIC;
7881         wol->wolopts = 0;
7882         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7883                 wol->wolopts = WAKE_MAGIC;
7884         memset(&wol->sopass, 0, sizeof(wol->sopass));
7885 }
7886
7887 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7888 {
7889         struct tg3 *tp = netdev_priv(dev);
7890
7891         if (wol->wolopts & ~WAKE_MAGIC)
7892                 return -EINVAL;
7893         if ((wol->wolopts & WAKE_MAGIC) &&
7894             tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
7895             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7896                 return -EINVAL;
7897
7898         spin_lock_bh(&tp->lock);
7899         if (wol->wolopts & WAKE_MAGIC)
7900                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7901         else
7902                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7903         spin_unlock_bh(&tp->lock);
7904
7905         return 0;
7906 }
7907
7908 static u32 tg3_get_msglevel(struct net_device *dev)
7909 {
7910         struct tg3 *tp = netdev_priv(dev);
7911         return tp->msg_enable;
7912 }
7913
7914 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7915 {
7916         struct tg3 *tp = netdev_priv(dev);
7917         tp->msg_enable = value;
7918 }
7919
7920 #if TG3_TSO_SUPPORT != 0
7921 static int tg3_set_tso(struct net_device *dev, u32 value)
7922 {
7923         struct tg3 *tp = netdev_priv(dev);
7924
7925         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7926                 if (value)
7927                         return -EINVAL;
7928                 return 0;
7929         }
7930         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) {
7931                 if (value)
7932                         dev->features |= NETIF_F_TSO6;
7933                 else
7934                         dev->features &= ~NETIF_F_TSO6;
7935         }
7936         return ethtool_op_set_tso(dev, value);
7937 }
7938 #endif
7939
7940 static int tg3_nway_reset(struct net_device *dev)
7941 {
7942         struct tg3 *tp = netdev_priv(dev);
7943         u32 bmcr;
7944         int r;
7945
7946         if (!netif_running(dev))
7947                 return -EAGAIN;
7948
7949         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7950                 return -EINVAL;
7951
7952         spin_lock_bh(&tp->lock);
7953         r = -EINVAL;
7954         tg3_readphy(tp, MII_BMCR, &bmcr);
7955         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7956             ((bmcr & BMCR_ANENABLE) ||
7957              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7958                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7959                                            BMCR_ANENABLE);
7960                 r = 0;
7961         }
7962         spin_unlock_bh(&tp->lock);
7963
7964         return r;
7965 }
7966
7967 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7968 {
7969         struct tg3 *tp = netdev_priv(dev);
7970
7971         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7972         ering->rx_mini_max_pending = 0;
7973         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7974                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7975         else
7976                 ering->rx_jumbo_max_pending = 0;
7977
7978         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7979
7980         ering->rx_pending = tp->rx_pending;
7981         ering->rx_mini_pending = 0;
7982         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7983                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7984         else
7985                 ering->rx_jumbo_pending = 0;
7986
7987         ering->tx_pending = tp->tx_pending;
7988 }
7989
7990 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7991 {
7992         struct tg3 *tp = netdev_priv(dev);
7993         int irq_sync = 0, err = 0;
7994
7995         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7996             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7997             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7998                 return -EINVAL;
7999
8000         if (netif_running(dev)) {
8001                 tg3_netif_stop(tp);
8002                 irq_sync = 1;
8003         }
8004
8005         tg3_full_lock(tp, irq_sync);
8006
8007         tp->rx_pending = ering->rx_pending;
8008
8009         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8010             tp->rx_pending > 63)
8011                 tp->rx_pending = 63;
8012         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8013         tp->tx_pending = ering->tx_pending;
8014
8015         if (netif_running(dev)) {
8016                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8017                 err = tg3_restart_hw(tp, 1);
8018                 if (!err)
8019                         tg3_netif_start(tp);
8020         }
8021
8022         tg3_full_unlock(tp);
8023
8024         return err;
8025 }
8026
8027 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8028 {
8029         struct tg3 *tp = netdev_priv(dev);
8030
8031         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8032         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8033         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8034 }
8035
8036 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8037 {
8038         struct tg3 *tp = netdev_priv(dev);
8039         int irq_sync = 0, err = 0;
8040
8041         if (netif_running(dev)) {
8042                 tg3_netif_stop(tp);
8043                 irq_sync = 1;
8044         }
8045
8046         tg3_full_lock(tp, irq_sync);
8047
8048         if (epause->autoneg)
8049                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8050         else
8051                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8052         if (epause->rx_pause)
8053                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8054         else
8055                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8056         if (epause->tx_pause)
8057                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8058         else
8059                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8060
8061         if (netif_running(dev)) {
8062                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8063                 err = tg3_restart_hw(tp, 1);
8064                 if (!err)
8065                         tg3_netif_start(tp);
8066         }
8067
8068         tg3_full_unlock(tp);
8069
8070         return err;
8071 }
8072
8073 static u32 tg3_get_rx_csum(struct net_device *dev)
8074 {
8075         struct tg3 *tp = netdev_priv(dev);
8076         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8077 }
8078
8079 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8080 {
8081         struct tg3 *tp = netdev_priv(dev);
8082
8083         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8084                 if (data != 0)
8085                         return -EINVAL;
8086                 return 0;
8087         }
8088
8089         spin_lock_bh(&tp->lock);
8090         if (data)
8091                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8092         else
8093                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8094         spin_unlock_bh(&tp->lock);
8095
8096         return 0;
8097 }
8098
8099 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8100 {
8101         struct tg3 *tp = netdev_priv(dev);
8102
8103         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8104                 if (data != 0)
8105                         return -EINVAL;
8106                 return 0;
8107         }
8108
8109         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8110             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8111                 ethtool_op_set_tx_hw_csum(dev, data);
8112         else
8113                 ethtool_op_set_tx_csum(dev, data);
8114
8115         return 0;
8116 }
8117
8118 static int tg3_get_stats_count (struct net_device *dev)
8119 {
8120         return TG3_NUM_STATS;
8121 }
8122
8123 static int tg3_get_test_count (struct net_device *dev)
8124 {
8125         return TG3_NUM_TEST;
8126 }
8127
8128 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8129 {
8130         switch (stringset) {
8131         case ETH_SS_STATS:
8132                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8133                 break;
8134         case ETH_SS_TEST:
8135                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8136                 break;
8137         default:
8138                 WARN_ON(1);     /* we need a WARN() */
8139                 break;
8140         }
8141 }
8142
8143 static int tg3_phys_id(struct net_device *dev, u32 data)
8144 {
8145         struct tg3 *tp = netdev_priv(dev);
8146         int i;
8147
8148         if (!netif_running(tp->dev))
8149                 return -EAGAIN;
8150
8151         if (data == 0)
8152                 data = 2;
8153
8154         for (i = 0; i < (data * 2); i++) {
8155                 if ((i % 2) == 0)
8156                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8157                                            LED_CTRL_1000MBPS_ON |
8158                                            LED_CTRL_100MBPS_ON |
8159                                            LED_CTRL_10MBPS_ON |
8160                                            LED_CTRL_TRAFFIC_OVERRIDE |
8161                                            LED_CTRL_TRAFFIC_BLINK |
8162                                            LED_CTRL_TRAFFIC_LED);
8163
8164                 else
8165                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8166                                            LED_CTRL_TRAFFIC_OVERRIDE);
8167
8168                 if (msleep_interruptible(500))
8169                         break;
8170         }
8171         tw32(MAC_LED_CTRL, tp->led_ctrl);
8172         return 0;
8173 }
8174
8175 static void tg3_get_ethtool_stats (struct net_device *dev,
8176                                    struct ethtool_stats *estats, u64 *tmp_stats)
8177 {
8178         struct tg3 *tp = netdev_priv(dev);
8179         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8180 }
8181
8182 #define NVRAM_TEST_SIZE 0x100
8183 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8184
8185 static int tg3_test_nvram(struct tg3 *tp)
8186 {
8187         u32 *buf, csum, magic;
8188         int i, j, err = 0, size;
8189
8190         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8191                 return -EIO;
8192
8193         if (magic == TG3_EEPROM_MAGIC)
8194                 size = NVRAM_TEST_SIZE;
8195         else if ((magic & 0xff000000) == 0xa5000000) {
8196                 if ((magic & 0xe00000) == 0x200000)
8197                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8198                 else
8199                         return 0;
8200         } else
8201                 return -EIO;
8202
8203         buf = kmalloc(size, GFP_KERNEL);
8204         if (buf == NULL)
8205                 return -ENOMEM;
8206
8207         err = -EIO;
8208         for (i = 0, j = 0; i < size; i += 4, j++) {
8209                 u32 val;
8210
8211                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8212                         break;
8213                 buf[j] = cpu_to_le32(val);
8214         }
8215         if (i < size)
8216                 goto out;
8217
8218         /* Selfboot format */
8219         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8220                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8221
8222                 for (i = 0; i < size; i++)
8223                         csum8 += buf8[i];
8224
8225                 if (csum8 == 0) {
8226                         err = 0;
8227                         goto out;
8228                 }
8229
8230                 err = -EIO;
8231                 goto out;
8232         }
8233
8234         /* Bootstrap checksum at offset 0x10 */
8235         csum = calc_crc((unsigned char *) buf, 0x10);
8236         if(csum != cpu_to_le32(buf[0x10/4]))
8237                 goto out;
8238
8239         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8240         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8241         if (csum != cpu_to_le32(buf[0xfc/4]))
8242                  goto out;
8243
8244         err = 0;
8245
8246 out:
8247         kfree(buf);
8248         return err;
8249 }
8250
8251 #define TG3_SERDES_TIMEOUT_SEC  2
8252 #define TG3_COPPER_TIMEOUT_SEC  6
8253
8254 static int tg3_test_link(struct tg3 *tp)
8255 {
8256         int i, max;
8257
8258         if (!netif_running(tp->dev))
8259                 return -ENODEV;
8260
8261         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8262                 max = TG3_SERDES_TIMEOUT_SEC;
8263         else
8264                 max = TG3_COPPER_TIMEOUT_SEC;
8265
8266         for (i = 0; i < max; i++) {
8267                 if (netif_carrier_ok(tp->dev))
8268                         return 0;
8269
8270                 if (msleep_interruptible(1000))
8271                         break;
8272         }
8273
8274         return -EIO;
8275 }
8276
8277 /* Only test the commonly used registers */
8278 static int tg3_test_registers(struct tg3 *tp)
8279 {
8280         int i, is_5705;
8281         u32 offset, read_mask, write_mask, val, save_val, read_val;
8282         static struct {
8283                 u16 offset;
8284                 u16 flags;
8285 #define TG3_FL_5705     0x1
8286 #define TG3_FL_NOT_5705 0x2
8287 #define TG3_FL_NOT_5788 0x4
8288                 u32 read_mask;
8289                 u32 write_mask;
8290         } reg_tbl[] = {
8291                 /* MAC Control Registers */
8292                 { MAC_MODE, TG3_FL_NOT_5705,
8293                         0x00000000, 0x00ef6f8c },
8294                 { MAC_MODE, TG3_FL_5705,
8295                         0x00000000, 0x01ef6b8c },
8296                 { MAC_STATUS, TG3_FL_NOT_5705,
8297                         0x03800107, 0x00000000 },
8298                 { MAC_STATUS, TG3_FL_5705,
8299                         0x03800100, 0x00000000 },
8300                 { MAC_ADDR_0_HIGH, 0x0000,
8301                         0x00000000, 0x0000ffff },
8302                 { MAC_ADDR_0_LOW, 0x0000,
8303                         0x00000000, 0xffffffff },
8304                 { MAC_RX_MTU_SIZE, 0x0000,
8305                         0x00000000, 0x0000ffff },
8306                 { MAC_TX_MODE, 0x0000,
8307                         0x00000000, 0x00000070 },
8308                 { MAC_TX_LENGTHS, 0x0000,
8309                         0x00000000, 0x00003fff },
8310                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8311                         0x00000000, 0x000007fc },
8312                 { MAC_RX_MODE, TG3_FL_5705,
8313                         0x00000000, 0x000007dc },
8314                 { MAC_HASH_REG_0, 0x0000,
8315                         0x00000000, 0xffffffff },
8316                 { MAC_HASH_REG_1, 0x0000,
8317                         0x00000000, 0xffffffff },
8318                 { MAC_HASH_REG_2, 0x0000,
8319                         0x00000000, 0xffffffff },
8320                 { MAC_HASH_REG_3, 0x0000,
8321                         0x00000000, 0xffffffff },
8322
8323                 /* Receive Data and Receive BD Initiator Control Registers. */
8324                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8325                         0x00000000, 0xffffffff },
8326                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8327                         0x00000000, 0xffffffff },
8328                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8329                         0x00000000, 0x00000003 },
8330                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8331                         0x00000000, 0xffffffff },
8332                 { RCVDBDI_STD_BD+0, 0x0000,
8333                         0x00000000, 0xffffffff },
8334                 { RCVDBDI_STD_BD+4, 0x0000,
8335                         0x00000000, 0xffffffff },
8336                 { RCVDBDI_STD_BD+8, 0x0000,
8337                         0x00000000, 0xffff0002 },
8338                 { RCVDBDI_STD_BD+0xc, 0x0000,
8339                         0x00000000, 0xffffffff },
8340
8341                 /* Receive BD Initiator Control Registers. */
8342                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8343                         0x00000000, 0xffffffff },
8344                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8345                         0x00000000, 0x000003ff },
8346                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8347                         0x00000000, 0xffffffff },
8348
8349                 /* Host Coalescing Control Registers. */
8350                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8351                         0x00000000, 0x00000004 },
8352                 { HOSTCC_MODE, TG3_FL_5705,
8353                         0x00000000, 0x000000f6 },
8354                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8355                         0x00000000, 0xffffffff },
8356                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8357                         0x00000000, 0x000003ff },
8358                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8359                         0x00000000, 0xffffffff },
8360                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8361                         0x00000000, 0x000003ff },
8362                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8363                         0x00000000, 0xffffffff },
8364                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8365                         0x00000000, 0x000000ff },
8366                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8367                         0x00000000, 0xffffffff },
8368                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8369                         0x00000000, 0x000000ff },
8370                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8371                         0x00000000, 0xffffffff },
8372                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8373                         0x00000000, 0xffffffff },
8374                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8375                         0x00000000, 0xffffffff },
8376                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8377                         0x00000000, 0x000000ff },
8378                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8379                         0x00000000, 0xffffffff },
8380                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8381                         0x00000000, 0x000000ff },
8382                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8383                         0x00000000, 0xffffffff },
8384                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8385                         0x00000000, 0xffffffff },
8386                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8387                         0x00000000, 0xffffffff },
8388                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8389                         0x00000000, 0xffffffff },
8390                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8391                         0x00000000, 0xffffffff },
8392                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8393                         0xffffffff, 0x00000000 },
8394                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8395                         0xffffffff, 0x00000000 },
8396
8397                 /* Buffer Manager Control Registers. */
8398                 { BUFMGR_MB_POOL_ADDR, 0x0000,
8399                         0x00000000, 0x007fff80 },
8400                 { BUFMGR_MB_POOL_SIZE, 0x0000,
8401                         0x00000000, 0x007fffff },
8402                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8403                         0x00000000, 0x0000003f },
8404                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8405                         0x00000000, 0x000001ff },
8406                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8407                         0x00000000, 0x000001ff },
8408                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8409                         0xffffffff, 0x00000000 },
8410                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8411                         0xffffffff, 0x00000000 },
8412
8413                 /* Mailbox Registers */
8414                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8415                         0x00000000, 0x000001ff },
8416                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8417                         0x00000000, 0x000001ff },
8418                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8419                         0x00000000, 0x000007ff },
8420                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8421                         0x00000000, 0x000001ff },
8422
8423                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8424         };
8425
8426         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8427                 is_5705 = 1;
8428         else
8429                 is_5705 = 0;
8430
8431         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8432                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8433                         continue;
8434
8435                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8436                         continue;
8437
8438                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8439                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8440                         continue;
8441
8442                 offset = (u32) reg_tbl[i].offset;
8443                 read_mask = reg_tbl[i].read_mask;
8444                 write_mask = reg_tbl[i].write_mask;
8445
8446                 /* Save the original register content */
8447                 save_val = tr32(offset);
8448
8449                 /* Determine the read-only value. */
8450                 read_val = save_val & read_mask;
8451
8452                 /* Write zero to the register, then make sure the read-only bits
8453                  * are not changed and the read/write bits are all zeros.
8454                  */
8455                 tw32(offset, 0);
8456
8457                 val = tr32(offset);
8458
8459                 /* Test the read-only and read/write bits. */
8460                 if (((val & read_mask) != read_val) || (val & write_mask))
8461                         goto out;
8462
8463                 /* Write ones to all the bits defined by RdMask and WrMask, then
8464                  * make sure the read-only bits are not changed and the
8465                  * read/write bits are all ones.
8466                  */
8467                 tw32(offset, read_mask | write_mask);
8468
8469                 val = tr32(offset);
8470
8471                 /* Test the read-only bits. */
8472                 if ((val & read_mask) != read_val)
8473                         goto out;
8474
8475                 /* Test the read/write bits. */
8476                 if ((val & write_mask) != write_mask)
8477                         goto out;
8478
8479                 tw32(offset, save_val);
8480         }
8481
8482         return 0;
8483
8484 out:
8485         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8486         tw32(offset, save_val);
8487         return -EIO;
8488 }
8489
8490 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8491 {
8492         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8493         int i;
8494         u32 j;
8495
8496         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8497                 for (j = 0; j < len; j += 4) {
8498                         u32 val;
8499
8500                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8501                         tg3_read_mem(tp, offset + j, &val);
8502                         if (val != test_pattern[i])
8503                                 return -EIO;
8504                 }
8505         }
8506         return 0;
8507 }
8508
8509 static int tg3_test_memory(struct tg3 *tp)
8510 {
8511         static struct mem_entry {
8512                 u32 offset;
8513                 u32 len;
8514         } mem_tbl_570x[] = {
8515                 { 0x00000000, 0x00b50},
8516                 { 0x00002000, 0x1c000},
8517                 { 0xffffffff, 0x00000}
8518         }, mem_tbl_5705[] = {
8519                 { 0x00000100, 0x0000c},
8520                 { 0x00000200, 0x00008},
8521                 { 0x00004000, 0x00800},
8522                 { 0x00006000, 0x01000},
8523                 { 0x00008000, 0x02000},
8524                 { 0x00010000, 0x0e000},
8525                 { 0xffffffff, 0x00000}
8526         }, mem_tbl_5755[] = {
8527                 { 0x00000200, 0x00008},
8528                 { 0x00004000, 0x00800},
8529                 { 0x00006000, 0x00800},
8530                 { 0x00008000, 0x02000},
8531                 { 0x00010000, 0x0c000},
8532                 { 0xffffffff, 0x00000}
8533         };
8534         struct mem_entry *mem_tbl;
8535         int err = 0;
8536         int i;
8537
8538         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8539                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8540                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8541                         mem_tbl = mem_tbl_5755;
8542                 else
8543                         mem_tbl = mem_tbl_5705;
8544         } else
8545                 mem_tbl = mem_tbl_570x;
8546
8547         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8548                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8549                     mem_tbl[i].len)) != 0)
8550                         break;
8551         }
8552
8553         return err;
8554 }
8555
8556 #define TG3_MAC_LOOPBACK        0
8557 #define TG3_PHY_LOOPBACK        1
8558
8559 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8560 {
8561         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8562         u32 desc_idx;
8563         struct sk_buff *skb, *rx_skb;
8564         u8 *tx_data;
8565         dma_addr_t map;
8566         int num_pkts, tx_len, rx_len, i, err;
8567         struct tg3_rx_buffer_desc *desc;
8568
8569         if (loopback_mode == TG3_MAC_LOOPBACK) {
8570                 /* HW errata - mac loopback fails in some cases on 5780.
8571                  * Normal traffic and PHY loopback are not affected by
8572                  * errata.
8573                  */
8574                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8575                         return 0;
8576
8577                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8578                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY;
8579                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8580                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8581                 else
8582                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8583                 tw32(MAC_MODE, mac_mode);
8584         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8585                 u32 val;
8586
8587                 val = BMCR_LOOPBACK | BMCR_FULLDPLX;
8588                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8589                         val |= BMCR_SPEED100;
8590                 else
8591                         val |= BMCR_SPEED1000;
8592
8593                 tg3_writephy(tp, MII_BMCR, val);
8594                 udelay(40);
8595                 /* reset to prevent losing 1st rx packet intermittently */
8596                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8597                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8598                         udelay(10);
8599                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8600                 }
8601                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8602                            MAC_MODE_LINK_POLARITY;
8603                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8604                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8605                 else
8606                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8607                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8608                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8609                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8610                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8611                 }
8612                 tw32(MAC_MODE, mac_mode);
8613         }
8614         else
8615                 return -EINVAL;
8616
8617         err = -EIO;
8618
8619         tx_len = 1514;
8620         skb = netdev_alloc_skb(tp->dev, tx_len);
8621         if (!skb)
8622                 return -ENOMEM;
8623
8624         tx_data = skb_put(skb, tx_len);
8625         memcpy(tx_data, tp->dev->dev_addr, 6);
8626         memset(tx_data + 6, 0x0, 8);
8627
8628         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8629
8630         for (i = 14; i < tx_len; i++)
8631                 tx_data[i] = (u8) (i & 0xff);
8632
8633         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8634
8635         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8636              HOSTCC_MODE_NOW);
8637
8638         udelay(10);
8639
8640         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8641
8642         num_pkts = 0;
8643
8644         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8645
8646         tp->tx_prod++;
8647         num_pkts++;
8648
8649         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8650                      tp->tx_prod);
8651         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8652
8653         udelay(10);
8654
8655         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
8656         for (i = 0; i < 25; i++) {
8657                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8658                        HOSTCC_MODE_NOW);
8659
8660                 udelay(10);
8661
8662                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8663                 rx_idx = tp->hw_status->idx[0].rx_producer;
8664                 if ((tx_idx == tp->tx_prod) &&
8665                     (rx_idx == (rx_start_idx + num_pkts)))
8666                         break;
8667         }
8668
8669         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8670         dev_kfree_skb(skb);
8671
8672         if (tx_idx != tp->tx_prod)
8673                 goto out;
8674
8675         if (rx_idx != rx_start_idx + num_pkts)
8676                 goto out;
8677
8678         desc = &tp->rx_rcb[rx_start_idx];
8679         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8680         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8681         if (opaque_key != RXD_OPAQUE_RING_STD)
8682                 goto out;
8683
8684         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8685             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8686                 goto out;
8687
8688         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8689         if (rx_len != tx_len)
8690                 goto out;
8691
8692         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8693
8694         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8695         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8696
8697         for (i = 14; i < tx_len; i++) {
8698                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8699                         goto out;
8700         }
8701         err = 0;
8702
8703         /* tg3_free_rings will unmap and free the rx_skb */
8704 out:
8705         return err;
8706 }
8707
8708 #define TG3_MAC_LOOPBACK_FAILED         1
8709 #define TG3_PHY_LOOPBACK_FAILED         2
8710 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8711                                          TG3_PHY_LOOPBACK_FAILED)
8712
8713 static int tg3_test_loopback(struct tg3 *tp)
8714 {
8715         int err = 0;
8716
8717         if (!netif_running(tp->dev))
8718                 return TG3_LOOPBACK_FAILED;
8719
8720         err = tg3_reset_hw(tp, 1);
8721         if (err)
8722                 return TG3_LOOPBACK_FAILED;
8723
8724         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8725                 err |= TG3_MAC_LOOPBACK_FAILED;
8726         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8727                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8728                         err |= TG3_PHY_LOOPBACK_FAILED;
8729         }
8730
8731         return err;
8732 }
8733
8734 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8735                           u64 *data)
8736 {
8737         struct tg3 *tp = netdev_priv(dev);
8738
8739         if (tp->link_config.phy_is_low_power)
8740                 tg3_set_power_state(tp, PCI_D0);
8741
8742         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8743
8744         if (tg3_test_nvram(tp) != 0) {
8745                 etest->flags |= ETH_TEST_FL_FAILED;
8746                 data[0] = 1;
8747         }
8748         if (tg3_test_link(tp) != 0) {
8749                 etest->flags |= ETH_TEST_FL_FAILED;
8750                 data[1] = 1;
8751         }
8752         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8753                 int err, irq_sync = 0;
8754
8755                 if (netif_running(dev)) {
8756                         tg3_netif_stop(tp);
8757                         irq_sync = 1;
8758                 }
8759
8760                 tg3_full_lock(tp, irq_sync);
8761
8762                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8763                 err = tg3_nvram_lock(tp);
8764                 tg3_halt_cpu(tp, RX_CPU_BASE);
8765                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8766                         tg3_halt_cpu(tp, TX_CPU_BASE);
8767                 if (!err)
8768                         tg3_nvram_unlock(tp);
8769
8770                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8771                         tg3_phy_reset(tp);
8772
8773                 if (tg3_test_registers(tp) != 0) {
8774                         etest->flags |= ETH_TEST_FL_FAILED;
8775                         data[2] = 1;
8776                 }
8777                 if (tg3_test_memory(tp) != 0) {
8778                         etest->flags |= ETH_TEST_FL_FAILED;
8779                         data[3] = 1;
8780                 }
8781                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8782                         etest->flags |= ETH_TEST_FL_FAILED;
8783
8784                 tg3_full_unlock(tp);
8785
8786                 if (tg3_test_interrupt(tp) != 0) {
8787                         etest->flags |= ETH_TEST_FL_FAILED;
8788                         data[5] = 1;
8789                 }
8790
8791                 tg3_full_lock(tp, 0);
8792
8793                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8794                 if (netif_running(dev)) {
8795                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8796                         if (!tg3_restart_hw(tp, 1))
8797                                 tg3_netif_start(tp);
8798                 }
8799
8800                 tg3_full_unlock(tp);
8801         }
8802         if (tp->link_config.phy_is_low_power)
8803                 tg3_set_power_state(tp, PCI_D3hot);
8804
8805 }
8806
8807 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8808 {
8809         struct mii_ioctl_data *data = if_mii(ifr);
8810         struct tg3 *tp = netdev_priv(dev);
8811         int err;
8812
8813         switch(cmd) {
8814         case SIOCGMIIPHY:
8815                 data->phy_id = PHY_ADDR;
8816
8817                 /* fallthru */
8818         case SIOCGMIIREG: {
8819                 u32 mii_regval;
8820
8821                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8822                         break;                  /* We have no PHY */
8823
8824                 if (tp->link_config.phy_is_low_power)
8825                         return -EAGAIN;
8826
8827                 spin_lock_bh(&tp->lock);
8828                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8829                 spin_unlock_bh(&tp->lock);
8830
8831                 data->val_out = mii_regval;
8832
8833                 return err;
8834         }
8835
8836         case SIOCSMIIREG:
8837                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8838                         break;                  /* We have no PHY */
8839
8840                 if (!capable(CAP_NET_ADMIN))
8841                         return -EPERM;
8842
8843                 if (tp->link_config.phy_is_low_power)
8844                         return -EAGAIN;
8845
8846                 spin_lock_bh(&tp->lock);
8847                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8848                 spin_unlock_bh(&tp->lock);
8849
8850                 return err;
8851
8852         default:
8853                 /* do nothing */
8854                 break;
8855         }
8856         return -EOPNOTSUPP;
8857 }
8858
8859 #if TG3_VLAN_TAG_USED
8860 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8861 {
8862         struct tg3 *tp = netdev_priv(dev);
8863
8864         if (netif_running(dev))
8865                 tg3_netif_stop(tp);
8866
8867         tg3_full_lock(tp, 0);
8868
8869         tp->vlgrp = grp;
8870
8871         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8872         __tg3_set_rx_mode(dev);
8873
8874         tg3_full_unlock(tp);
8875
8876         if (netif_running(dev))
8877                 tg3_netif_start(tp);
8878 }
8879
8880 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8881 {
8882         struct tg3 *tp = netdev_priv(dev);
8883
8884         if (netif_running(dev))
8885                 tg3_netif_stop(tp);
8886
8887         tg3_full_lock(tp, 0);
8888         if (tp->vlgrp)
8889                 tp->vlgrp->vlan_devices[vid] = NULL;
8890         tg3_full_unlock(tp);
8891
8892         if (netif_running(dev))
8893                 tg3_netif_start(tp);
8894 }
8895 #endif
8896
8897 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8898 {
8899         struct tg3 *tp = netdev_priv(dev);
8900
8901         memcpy(ec, &tp->coal, sizeof(*ec));
8902         return 0;
8903 }
8904
8905 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8906 {
8907         struct tg3 *tp = netdev_priv(dev);
8908         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8909         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8910
8911         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8912                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8913                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8914                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8915                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8916         }
8917
8918         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8919             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8920             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8921             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8922             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8923             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8924             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8925             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8926             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8927             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8928                 return -EINVAL;
8929
8930         /* No rx interrupts will be generated if both are zero */
8931         if ((ec->rx_coalesce_usecs == 0) &&
8932             (ec->rx_max_coalesced_frames == 0))
8933                 return -EINVAL;
8934
8935         /* No tx interrupts will be generated if both are zero */
8936         if ((ec->tx_coalesce_usecs == 0) &&
8937             (ec->tx_max_coalesced_frames == 0))
8938                 return -EINVAL;
8939
8940         /* Only copy relevant parameters, ignore all others. */
8941         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8942         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8943         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8944         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8945         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8946         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8947         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8948         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8949         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8950
8951         if (netif_running(dev)) {
8952                 tg3_full_lock(tp, 0);
8953                 __tg3_set_coalesce(tp, &tp->coal);
8954                 tg3_full_unlock(tp);
8955         }
8956         return 0;
8957 }
8958
8959 static const struct ethtool_ops tg3_ethtool_ops = {
8960         .get_settings           = tg3_get_settings,
8961         .set_settings           = tg3_set_settings,
8962         .get_drvinfo            = tg3_get_drvinfo,
8963         .get_regs_len           = tg3_get_regs_len,
8964         .get_regs               = tg3_get_regs,
8965         .get_wol                = tg3_get_wol,
8966         .set_wol                = tg3_set_wol,
8967         .get_msglevel           = tg3_get_msglevel,
8968         .set_msglevel           = tg3_set_msglevel,
8969         .nway_reset             = tg3_nway_reset,
8970         .get_link               = ethtool_op_get_link,
8971         .get_eeprom_len         = tg3_get_eeprom_len,
8972         .get_eeprom             = tg3_get_eeprom,
8973         .set_eeprom             = tg3_set_eeprom,
8974         .get_ringparam          = tg3_get_ringparam,
8975         .set_ringparam          = tg3_set_ringparam,
8976         .get_pauseparam         = tg3_get_pauseparam,
8977         .set_pauseparam         = tg3_set_pauseparam,
8978         .get_rx_csum            = tg3_get_rx_csum,
8979         .set_rx_csum            = tg3_set_rx_csum,
8980         .get_tx_csum            = ethtool_op_get_tx_csum,
8981         .set_tx_csum            = tg3_set_tx_csum,
8982         .get_sg                 = ethtool_op_get_sg,
8983         .set_sg                 = ethtool_op_set_sg,
8984 #if TG3_TSO_SUPPORT != 0
8985         .get_tso                = ethtool_op_get_tso,
8986         .set_tso                = tg3_set_tso,
8987 #endif
8988         .self_test_count        = tg3_get_test_count,
8989         .self_test              = tg3_self_test,
8990         .get_strings            = tg3_get_strings,
8991         .phys_id                = tg3_phys_id,
8992         .get_stats_count        = tg3_get_stats_count,
8993         .get_ethtool_stats      = tg3_get_ethtool_stats,
8994         .get_coalesce           = tg3_get_coalesce,
8995         .set_coalesce           = tg3_set_coalesce,
8996         .get_perm_addr          = ethtool_op_get_perm_addr,
8997 };
8998
8999 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9000 {
9001         u32 cursize, val, magic;
9002
9003         tp->nvram_size = EEPROM_CHIP_SIZE;
9004
9005         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9006                 return;
9007
9008         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
9009                 return;
9010
9011         /*
9012          * Size the chip by reading offsets at increasing powers of two.
9013          * When we encounter our validation signature, we know the addressing
9014          * has wrapped around, and thus have our chip size.
9015          */
9016         cursize = 0x10;
9017
9018         while (cursize < tp->nvram_size) {
9019                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9020                         return;
9021
9022                 if (val == magic)
9023                         break;
9024
9025                 cursize <<= 1;
9026         }
9027
9028         tp->nvram_size = cursize;
9029 }
9030
9031 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9032 {
9033         u32 val;
9034
9035         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9036                 return;
9037
9038         /* Selfboot format */
9039         if (val != TG3_EEPROM_MAGIC) {
9040                 tg3_get_eeprom_size(tp);
9041                 return;
9042         }
9043
9044         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9045                 if (val != 0) {
9046                         tp->nvram_size = (val >> 16) * 1024;
9047                         return;
9048                 }
9049         }
9050         tp->nvram_size = 0x20000;
9051 }
9052
9053 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9054 {
9055         u32 nvcfg1;
9056
9057         nvcfg1 = tr32(NVRAM_CFG1);
9058         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9059                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9060         }
9061         else {
9062                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9063                 tw32(NVRAM_CFG1, nvcfg1);
9064         }
9065
9066         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9067             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9068                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9069                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9070                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9071                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9072                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9073                                 break;
9074                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9075                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9076                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9077                                 break;
9078                         case FLASH_VENDOR_ATMEL_EEPROM:
9079                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9080                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9081                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9082                                 break;
9083                         case FLASH_VENDOR_ST:
9084                                 tp->nvram_jedecnum = JEDEC_ST;
9085                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9086                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9087                                 break;
9088                         case FLASH_VENDOR_SAIFUN:
9089                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9090                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9091                                 break;
9092                         case FLASH_VENDOR_SST_SMALL:
9093                         case FLASH_VENDOR_SST_LARGE:
9094                                 tp->nvram_jedecnum = JEDEC_SST;
9095                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9096                                 break;
9097                 }
9098         }
9099         else {
9100                 tp->nvram_jedecnum = JEDEC_ATMEL;
9101                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9102                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9103         }
9104 }
9105
9106 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9107 {
9108         u32 nvcfg1;
9109
9110         nvcfg1 = tr32(NVRAM_CFG1);
9111
9112         /* NVRAM protection for TPM */
9113         if (nvcfg1 & (1 << 27))
9114                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9115
9116         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9117                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9118                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9119                         tp->nvram_jedecnum = JEDEC_ATMEL;
9120                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9121                         break;
9122                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9123                         tp->nvram_jedecnum = JEDEC_ATMEL;
9124                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9125                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9126                         break;
9127                 case FLASH_5752VENDOR_ST_M45PE10:
9128                 case FLASH_5752VENDOR_ST_M45PE20:
9129                 case FLASH_5752VENDOR_ST_M45PE40:
9130                         tp->nvram_jedecnum = JEDEC_ST;
9131                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9132                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9133                         break;
9134         }
9135
9136         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9137                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9138                         case FLASH_5752PAGE_SIZE_256:
9139                                 tp->nvram_pagesize = 256;
9140                                 break;
9141                         case FLASH_5752PAGE_SIZE_512:
9142                                 tp->nvram_pagesize = 512;
9143                                 break;
9144                         case FLASH_5752PAGE_SIZE_1K:
9145                                 tp->nvram_pagesize = 1024;
9146                                 break;
9147                         case FLASH_5752PAGE_SIZE_2K:
9148                                 tp->nvram_pagesize = 2048;
9149                                 break;
9150                         case FLASH_5752PAGE_SIZE_4K:
9151                                 tp->nvram_pagesize = 4096;
9152                                 break;
9153                         case FLASH_5752PAGE_SIZE_264:
9154                                 tp->nvram_pagesize = 264;
9155                                 break;
9156                 }
9157         }
9158         else {
9159                 /* For eeprom, set pagesize to maximum eeprom size */
9160                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9161
9162                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9163                 tw32(NVRAM_CFG1, nvcfg1);
9164         }
9165 }
9166
9167 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9168 {
9169         u32 nvcfg1;
9170
9171         nvcfg1 = tr32(NVRAM_CFG1);
9172
9173         /* NVRAM protection for TPM */
9174         if (nvcfg1 & (1 << 27))
9175                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9176
9177         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9178                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9179                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9180                         tp->nvram_jedecnum = JEDEC_ATMEL;
9181                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9182                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9183
9184                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9185                         tw32(NVRAM_CFG1, nvcfg1);
9186                         break;
9187                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9188                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9189                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9190                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9191                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9192                         tp->nvram_jedecnum = JEDEC_ATMEL;
9193                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9194                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9195                         tp->nvram_pagesize = 264;
9196                         break;
9197                 case FLASH_5752VENDOR_ST_M45PE10:
9198                 case FLASH_5752VENDOR_ST_M45PE20:
9199                 case FLASH_5752VENDOR_ST_M45PE40:
9200                         tp->nvram_jedecnum = JEDEC_ST;
9201                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9202                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9203                         tp->nvram_pagesize = 256;
9204                         break;
9205         }
9206 }
9207
9208 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9209 {
9210         u32 nvcfg1;
9211
9212         nvcfg1 = tr32(NVRAM_CFG1);
9213
9214         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9215                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9216                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9217                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9218                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9219                         tp->nvram_jedecnum = JEDEC_ATMEL;
9220                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9221                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9222
9223                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9224                         tw32(NVRAM_CFG1, nvcfg1);
9225                         break;
9226                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9227                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9228                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9229                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9230                         tp->nvram_jedecnum = JEDEC_ATMEL;
9231                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9232                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9233                         tp->nvram_pagesize = 264;
9234                         break;
9235                 case FLASH_5752VENDOR_ST_M45PE10:
9236                 case FLASH_5752VENDOR_ST_M45PE20:
9237                 case FLASH_5752VENDOR_ST_M45PE40:
9238                         tp->nvram_jedecnum = JEDEC_ST;
9239                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9240                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9241                         tp->nvram_pagesize = 256;
9242                         break;
9243         }
9244 }
9245
9246 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9247 static void __devinit tg3_nvram_init(struct tg3 *tp)
9248 {
9249         int j;
9250
9251         tw32_f(GRC_EEPROM_ADDR,
9252              (EEPROM_ADDR_FSM_RESET |
9253               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9254                EEPROM_ADDR_CLKPERD_SHIFT)));
9255
9256         /* XXX schedule_timeout() ... */
9257         for (j = 0; j < 100; j++)
9258                 udelay(10);
9259
9260         /* Enable seeprom accesses. */
9261         tw32_f(GRC_LOCAL_CTRL,
9262              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9263         udelay(100);
9264
9265         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9266             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9267                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9268
9269                 if (tg3_nvram_lock(tp)) {
9270                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9271                                "tg3_nvram_init failed.\n", tp->dev->name);
9272                         return;
9273                 }
9274                 tg3_enable_nvram_access(tp);
9275
9276                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9277                         tg3_get_5752_nvram_info(tp);
9278                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9279                         tg3_get_5755_nvram_info(tp);
9280                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9281                         tg3_get_5787_nvram_info(tp);
9282                 else
9283                         tg3_get_nvram_info(tp);
9284
9285                 tg3_get_nvram_size(tp);
9286
9287                 tg3_disable_nvram_access(tp);
9288                 tg3_nvram_unlock(tp);
9289
9290         } else {
9291                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9292
9293                 tg3_get_eeprom_size(tp);
9294         }
9295 }
9296
9297 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9298                                         u32 offset, u32 *val)
9299 {
9300         u32 tmp;
9301         int i;
9302
9303         if (offset > EEPROM_ADDR_ADDR_MASK ||
9304             (offset % 4) != 0)
9305                 return -EINVAL;
9306
9307         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9308                                         EEPROM_ADDR_DEVID_MASK |
9309                                         EEPROM_ADDR_READ);
9310         tw32(GRC_EEPROM_ADDR,
9311              tmp |
9312              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9313              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9314               EEPROM_ADDR_ADDR_MASK) |
9315              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9316
9317         for (i = 0; i < 10000; i++) {
9318                 tmp = tr32(GRC_EEPROM_ADDR);
9319
9320                 if (tmp & EEPROM_ADDR_COMPLETE)
9321                         break;
9322                 udelay(100);
9323         }
9324         if (!(tmp & EEPROM_ADDR_COMPLETE))
9325                 return -EBUSY;
9326
9327         *val = tr32(GRC_EEPROM_DATA);
9328         return 0;
9329 }
9330
9331 #define NVRAM_CMD_TIMEOUT 10000
9332
9333 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9334 {
9335         int i;
9336
9337         tw32(NVRAM_CMD, nvram_cmd);
9338         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9339                 udelay(10);
9340                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9341                         udelay(10);
9342                         break;
9343                 }
9344         }
9345         if (i == NVRAM_CMD_TIMEOUT) {
9346                 return -EBUSY;
9347         }
9348         return 0;
9349 }
9350
9351 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9352 {
9353         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9354             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9355             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9356             (tp->nvram_jedecnum == JEDEC_ATMEL))
9357
9358                 addr = ((addr / tp->nvram_pagesize) <<
9359                         ATMEL_AT45DB0X1B_PAGE_POS) +
9360                        (addr % tp->nvram_pagesize);
9361
9362         return addr;
9363 }
9364
9365 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9366 {
9367         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9368             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9369             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9370             (tp->nvram_jedecnum == JEDEC_ATMEL))
9371
9372                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9373                         tp->nvram_pagesize) +
9374                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9375
9376         return addr;
9377 }
9378
9379 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9380 {
9381         int ret;
9382
9383         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9384                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9385
9386         offset = tg3_nvram_phys_addr(tp, offset);
9387
9388         if (offset > NVRAM_ADDR_MSK)
9389                 return -EINVAL;
9390
9391         ret = tg3_nvram_lock(tp);
9392         if (ret)
9393                 return ret;
9394
9395         tg3_enable_nvram_access(tp);
9396
9397         tw32(NVRAM_ADDR, offset);
9398         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9399                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9400
9401         if (ret == 0)
9402                 *val = swab32(tr32(NVRAM_RDDATA));
9403
9404         tg3_disable_nvram_access(tp);
9405
9406         tg3_nvram_unlock(tp);
9407
9408         return ret;
9409 }
9410
9411 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9412 {
9413         int err;
9414         u32 tmp;
9415
9416         err = tg3_nvram_read(tp, offset, &tmp);
9417         *val = swab32(tmp);
9418         return err;
9419 }
9420
9421 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9422                                     u32 offset, u32 len, u8 *buf)
9423 {
9424         int i, j, rc = 0;
9425         u32 val;
9426
9427         for (i = 0; i < len; i += 4) {
9428                 u32 addr, data;
9429
9430                 addr = offset + i;
9431
9432                 memcpy(&data, buf + i, 4);
9433
9434                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9435
9436                 val = tr32(GRC_EEPROM_ADDR);
9437                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9438
9439                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9440                         EEPROM_ADDR_READ);
9441                 tw32(GRC_EEPROM_ADDR, val |
9442                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9443                         (addr & EEPROM_ADDR_ADDR_MASK) |
9444                         EEPROM_ADDR_START |
9445                         EEPROM_ADDR_WRITE);
9446
9447                 for (j = 0; j < 10000; j++) {
9448                         val = tr32(GRC_EEPROM_ADDR);
9449
9450                         if (val & EEPROM_ADDR_COMPLETE)
9451                                 break;
9452                         udelay(100);
9453                 }
9454                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9455                         rc = -EBUSY;
9456                         break;
9457                 }
9458         }
9459
9460         return rc;
9461 }
9462
9463 /* offset and length are dword aligned */
9464 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9465                 u8 *buf)
9466 {
9467         int ret = 0;
9468         u32 pagesize = tp->nvram_pagesize;
9469         u32 pagemask = pagesize - 1;
9470         u32 nvram_cmd;
9471         u8 *tmp;
9472
9473         tmp = kmalloc(pagesize, GFP_KERNEL);
9474         if (tmp == NULL)
9475                 return -ENOMEM;
9476
9477         while (len) {
9478                 int j;
9479                 u32 phy_addr, page_off, size;
9480
9481                 phy_addr = offset & ~pagemask;
9482
9483                 for (j = 0; j < pagesize; j += 4) {
9484                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9485                                                 (u32 *) (tmp + j))))
9486                                 break;
9487                 }
9488                 if (ret)
9489                         break;
9490
9491                 page_off = offset & pagemask;
9492                 size = pagesize;
9493                 if (len < size)
9494                         size = len;
9495
9496                 len -= size;
9497
9498                 memcpy(tmp + page_off, buf, size);
9499
9500                 offset = offset + (pagesize - page_off);
9501
9502                 tg3_enable_nvram_access(tp);
9503
9504                 /*
9505                  * Before we can erase the flash page, we need
9506                  * to issue a special "write enable" command.
9507                  */
9508                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9509
9510                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9511                         break;
9512
9513                 /* Erase the target page */
9514                 tw32(NVRAM_ADDR, phy_addr);
9515
9516                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9517                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9518
9519                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9520                         break;
9521
9522                 /* Issue another write enable to start the write. */
9523                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9524
9525                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9526                         break;
9527
9528                 for (j = 0; j < pagesize; j += 4) {
9529                         u32 data;
9530
9531                         data = *((u32 *) (tmp + j));
9532                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9533
9534                         tw32(NVRAM_ADDR, phy_addr + j);
9535
9536                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9537                                 NVRAM_CMD_WR;
9538
9539                         if (j == 0)
9540                                 nvram_cmd |= NVRAM_CMD_FIRST;
9541                         else if (j == (pagesize - 4))
9542                                 nvram_cmd |= NVRAM_CMD_LAST;
9543
9544                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9545                                 break;
9546                 }
9547                 if (ret)
9548                         break;
9549         }
9550
9551         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9552         tg3_nvram_exec_cmd(tp, nvram_cmd);
9553
9554         kfree(tmp);
9555
9556         return ret;
9557 }
9558
9559 /* offset and length are dword aligned */
9560 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9561                 u8 *buf)
9562 {
9563         int i, ret = 0;
9564
9565         for (i = 0; i < len; i += 4, offset += 4) {
9566                 u32 data, page_off, phy_addr, nvram_cmd;
9567
9568                 memcpy(&data, buf + i, 4);
9569                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9570
9571                 page_off = offset % tp->nvram_pagesize;
9572
9573                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9574
9575                 tw32(NVRAM_ADDR, phy_addr);
9576
9577                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9578
9579                 if ((page_off == 0) || (i == 0))
9580                         nvram_cmd |= NVRAM_CMD_FIRST;
9581                 if (page_off == (tp->nvram_pagesize - 4))
9582                         nvram_cmd |= NVRAM_CMD_LAST;
9583
9584                 if (i == (len - 4))
9585                         nvram_cmd |= NVRAM_CMD_LAST;
9586
9587                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9588                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9589                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9590                     (tp->nvram_jedecnum == JEDEC_ST) &&
9591                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9592
9593                         if ((ret = tg3_nvram_exec_cmd(tp,
9594                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9595                                 NVRAM_CMD_DONE)))
9596
9597                                 break;
9598                 }
9599                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9600                         /* We always do complete word writes to eeprom. */
9601                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9602                 }
9603
9604                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9605                         break;
9606         }
9607         return ret;
9608 }
9609
9610 /* offset and length are dword aligned */
9611 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9612 {
9613         int ret;
9614
9615         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9616                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9617                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9618                 udelay(40);
9619         }
9620
9621         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9622                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9623         }
9624         else {
9625                 u32 grc_mode;
9626
9627                 ret = tg3_nvram_lock(tp);
9628                 if (ret)
9629                         return ret;
9630
9631                 tg3_enable_nvram_access(tp);
9632                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9633                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9634                         tw32(NVRAM_WRITE1, 0x406);
9635
9636                 grc_mode = tr32(GRC_MODE);
9637                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9638
9639                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9640                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9641
9642                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9643                                 buf);
9644                 }
9645                 else {
9646                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9647                                 buf);
9648                 }
9649
9650                 grc_mode = tr32(GRC_MODE);
9651                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9652
9653                 tg3_disable_nvram_access(tp);
9654                 tg3_nvram_unlock(tp);
9655         }
9656
9657         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9658                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9659                 udelay(40);
9660         }
9661
9662         return ret;
9663 }
9664
9665 struct subsys_tbl_ent {
9666         u16 subsys_vendor, subsys_devid;
9667         u32 phy_id;
9668 };
9669
9670 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9671         /* Broadcom boards. */
9672         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9673         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9674         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9675         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9676         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9677         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9678         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9679         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9680         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9681         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9682         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9683
9684         /* 3com boards. */
9685         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9686         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9687         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9688         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9689         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9690
9691         /* DELL boards. */
9692         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9693         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9694         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9695         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9696
9697         /* Compaq boards. */
9698         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9699         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9700         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9701         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9702         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9703
9704         /* IBM boards. */
9705         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9706 };
9707
9708 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9709 {
9710         int i;
9711
9712         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9713                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9714                      tp->pdev->subsystem_vendor) &&
9715                     (subsys_id_to_phy_id[i].subsys_devid ==
9716                      tp->pdev->subsystem_device))
9717                         return &subsys_id_to_phy_id[i];
9718         }
9719         return NULL;
9720 }
9721
9722 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9723 {
9724         u32 val;
9725         u16 pmcsr;
9726
9727         /* On some early chips the SRAM cannot be accessed in D3hot state,
9728          * so need make sure we're in D0.
9729          */
9730         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9731         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9732         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9733         msleep(1);
9734
9735         /* Make sure register accesses (indirect or otherwise)
9736          * will function correctly.
9737          */
9738         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9739                                tp->misc_host_ctrl);
9740
9741         /* The memory arbiter has to be enabled in order for SRAM accesses
9742          * to succeed.  Normally on powerup the tg3 chip firmware will make
9743          * sure it is enabled, but other entities such as system netboot
9744          * code might disable it.
9745          */
9746         val = tr32(MEMARB_MODE);
9747         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9748
9749         tp->phy_id = PHY_ID_INVALID;
9750         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9751
9752         /* Assume an onboard device by default.  */
9753         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9754
9755         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9756         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9757                 u32 nic_cfg, led_cfg;
9758                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9759                 int eeprom_phy_serdes = 0;
9760
9761                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9762                 tp->nic_sram_data_cfg = nic_cfg;
9763
9764                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9765                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9766                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9767                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9768                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9769                     (ver > 0) && (ver < 0x100))
9770                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9771
9772                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9773                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9774                         eeprom_phy_serdes = 1;
9775
9776                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9777                 if (nic_phy_id != 0) {
9778                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9779                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9780
9781                         eeprom_phy_id  = (id1 >> 16) << 10;
9782                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9783                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9784                 } else
9785                         eeprom_phy_id = 0;
9786
9787                 tp->phy_id = eeprom_phy_id;
9788                 if (eeprom_phy_serdes) {
9789                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9790                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9791                         else
9792                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9793                 }
9794
9795                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9796                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9797                                     SHASTA_EXT_LED_MODE_MASK);
9798                 else
9799                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9800
9801                 switch (led_cfg) {
9802                 default:
9803                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9804                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9805                         break;
9806
9807                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9808                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9809                         break;
9810
9811                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9812                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9813
9814                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9815                          * read on some older 5700/5701 bootcode.
9816                          */
9817                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9818                             ASIC_REV_5700 ||
9819                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9820                             ASIC_REV_5701)
9821                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9822
9823                         break;
9824
9825                 case SHASTA_EXT_LED_SHARED:
9826                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9827                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9828                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9829                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9830                                                  LED_CTRL_MODE_PHY_2);
9831                         break;
9832
9833                 case SHASTA_EXT_LED_MAC:
9834                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9835                         break;
9836
9837                 case SHASTA_EXT_LED_COMBO:
9838                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9839                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9840                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9841                                                  LED_CTRL_MODE_PHY_2);
9842                         break;
9843
9844                 };
9845
9846                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9847                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9848                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9849                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9850
9851                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
9852                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9853                 else
9854                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9855
9856                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9857                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9858                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9859                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9860                 }
9861                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9862                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9863
9864                 if (cfg2 & (1 << 17))
9865                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9866
9867                 /* serdes signal pre-emphasis in register 0x590 set by */
9868                 /* bootcode if bit 18 is set */
9869                 if (cfg2 & (1 << 18))
9870                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9871         }
9872 }
9873
9874 static int __devinit tg3_phy_probe(struct tg3 *tp)
9875 {
9876         u32 hw_phy_id_1, hw_phy_id_2;
9877         u32 hw_phy_id, hw_phy_id_masked;
9878         int err;
9879
9880         /* Reading the PHY ID register can conflict with ASF
9881          * firwmare access to the PHY hardware.
9882          */
9883         err = 0;
9884         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9885                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9886         } else {
9887                 /* Now read the physical PHY_ID from the chip and verify
9888                  * that it is sane.  If it doesn't look good, we fall back
9889                  * to either the hard-coded table based PHY_ID and failing
9890                  * that the value found in the eeprom area.
9891                  */
9892                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9893                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9894
9895                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9896                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9897                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9898
9899                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9900         }
9901
9902         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9903                 tp->phy_id = hw_phy_id;
9904                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9905                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9906                 else
9907                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9908         } else {
9909                 if (tp->phy_id != PHY_ID_INVALID) {
9910                         /* Do nothing, phy ID already set up in
9911                          * tg3_get_eeprom_hw_cfg().
9912                          */
9913                 } else {
9914                         struct subsys_tbl_ent *p;
9915
9916                         /* No eeprom signature?  Try the hardcoded
9917                          * subsys device table.
9918                          */
9919                         p = lookup_by_subsys(tp);
9920                         if (!p)
9921                                 return -ENODEV;
9922
9923                         tp->phy_id = p->phy_id;
9924                         if (!tp->phy_id ||
9925                             tp->phy_id == PHY_ID_BCM8002)
9926                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9927                 }
9928         }
9929
9930         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9931             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9932                 u32 bmsr, adv_reg, tg3_ctrl;
9933
9934                 tg3_readphy(tp, MII_BMSR, &bmsr);
9935                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9936                     (bmsr & BMSR_LSTATUS))
9937                         goto skip_phy_reset;
9938
9939                 err = tg3_phy_reset(tp);
9940                 if (err)
9941                         return err;
9942
9943                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9944                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9945                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9946                 tg3_ctrl = 0;
9947                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9948                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9949                                     MII_TG3_CTRL_ADV_1000_FULL);
9950                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9951                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9952                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9953                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9954                 }
9955
9956                 if (!tg3_copper_is_advertising_all(tp)) {
9957                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9958
9959                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9960                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9961
9962                         tg3_writephy(tp, MII_BMCR,
9963                                      BMCR_ANENABLE | BMCR_ANRESTART);
9964                 }
9965                 tg3_phy_set_wirespeed(tp);
9966
9967                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9968                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9969                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9970         }
9971
9972 skip_phy_reset:
9973         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9974                 err = tg3_init_5401phy_dsp(tp);
9975                 if (err)
9976                         return err;
9977         }
9978
9979         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9980                 err = tg3_init_5401phy_dsp(tp);
9981         }
9982
9983         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9984                 tp->link_config.advertising =
9985                         (ADVERTISED_1000baseT_Half |
9986                          ADVERTISED_1000baseT_Full |
9987                          ADVERTISED_Autoneg |
9988                          ADVERTISED_FIBRE);
9989         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9990                 tp->link_config.advertising &=
9991                         ~(ADVERTISED_1000baseT_Half |
9992                           ADVERTISED_1000baseT_Full);
9993
9994         return err;
9995 }
9996
9997 static void __devinit tg3_read_partno(struct tg3 *tp)
9998 {
9999         unsigned char vpd_data[256];
10000         int i;
10001         u32 magic;
10002
10003         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10004                 goto out_not_found;
10005
10006         if (magic == TG3_EEPROM_MAGIC) {
10007                 for (i = 0; i < 256; i += 4) {
10008                         u32 tmp;
10009
10010                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10011                                 goto out_not_found;
10012
10013                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10014                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10015                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10016                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10017                 }
10018         } else {
10019                 int vpd_cap;
10020
10021                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10022                 for (i = 0; i < 256; i += 4) {
10023                         u32 tmp, j = 0;
10024                         u16 tmp16;
10025
10026                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10027                                               i);
10028                         while (j++ < 100) {
10029                                 pci_read_config_word(tp->pdev, vpd_cap +
10030                                                      PCI_VPD_ADDR, &tmp16);
10031                                 if (tmp16 & 0x8000)
10032                                         break;
10033                                 msleep(1);
10034                         }
10035                         if (!(tmp16 & 0x8000))
10036                                 goto out_not_found;
10037
10038                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10039                                               &tmp);
10040                         tmp = cpu_to_le32(tmp);
10041                         memcpy(&vpd_data[i], &tmp, 4);
10042                 }
10043         }
10044
10045         /* Now parse and find the part number. */
10046         for (i = 0; i < 256; ) {
10047                 unsigned char val = vpd_data[i];
10048                 int block_end;
10049
10050                 if (val == 0x82 || val == 0x91) {
10051                         i = (i + 3 +
10052                              (vpd_data[i + 1] +
10053                               (vpd_data[i + 2] << 8)));
10054                         continue;
10055                 }
10056
10057                 if (val != 0x90)
10058                         goto out_not_found;
10059
10060                 block_end = (i + 3 +
10061                              (vpd_data[i + 1] +
10062                               (vpd_data[i + 2] << 8)));
10063                 i += 3;
10064                 while (i < block_end) {
10065                         if (vpd_data[i + 0] == 'P' &&
10066                             vpd_data[i + 1] == 'N') {
10067                                 int partno_len = vpd_data[i + 2];
10068
10069                                 if (partno_len > 24)
10070                                         goto out_not_found;
10071
10072                                 memcpy(tp->board_part_number,
10073                                        &vpd_data[i + 3],
10074                                        partno_len);
10075
10076                                 /* Success. */
10077                                 return;
10078                         }
10079                 }
10080
10081                 /* Part number not found. */
10082                 goto out_not_found;
10083         }
10084
10085 out_not_found:
10086         strcpy(tp->board_part_number, "none");
10087 }
10088
10089 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10090 {
10091         u32 val, offset, start;
10092
10093         if (tg3_nvram_read_swab(tp, 0, &val))
10094                 return;
10095
10096         if (val != TG3_EEPROM_MAGIC)
10097                 return;
10098
10099         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10100             tg3_nvram_read_swab(tp, 0x4, &start))
10101                 return;
10102
10103         offset = tg3_nvram_logical_addr(tp, offset);
10104         if (tg3_nvram_read_swab(tp, offset, &val))
10105                 return;
10106
10107         if ((val & 0xfc000000) == 0x0c000000) {
10108                 u32 ver_offset, addr;
10109                 int i;
10110
10111                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10112                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10113                         return;
10114
10115                 if (val != 0)
10116                         return;
10117
10118                 addr = offset + ver_offset - start;
10119                 for (i = 0; i < 16; i += 4) {
10120                         if (tg3_nvram_read(tp, addr + i, &val))
10121                                 return;
10122
10123                         val = cpu_to_le32(val);
10124                         memcpy(tp->fw_ver + i, &val, 4);
10125                 }
10126         }
10127 }
10128
10129 static int __devinit tg3_get_invariants(struct tg3 *tp)
10130 {
10131         static struct pci_device_id write_reorder_chipsets[] = {
10132                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10133                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10134                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10135                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10136                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10137                              PCI_DEVICE_ID_VIA_8385_0) },
10138                 { },
10139         };
10140         u32 misc_ctrl_reg;
10141         u32 cacheline_sz_reg;
10142         u32 pci_state_reg, grc_misc_cfg;
10143         u32 val;
10144         u16 pci_cmd;
10145         int err;
10146
10147         /* Force memory write invalidate off.  If we leave it on,
10148          * then on 5700_BX chips we have to enable a workaround.
10149          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10150          * to match the cacheline size.  The Broadcom driver have this
10151          * workaround but turns MWI off all the times so never uses
10152          * it.  This seems to suggest that the workaround is insufficient.
10153          */
10154         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10155         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10156         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10157
10158         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10159          * has the register indirect write enable bit set before
10160          * we try to access any of the MMIO registers.  It is also
10161          * critical that the PCI-X hw workaround situation is decided
10162          * before that as well.
10163          */
10164         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10165                               &misc_ctrl_reg);
10166
10167         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10168                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10169
10170         /* Wrong chip ID in 5752 A0. This code can be removed later
10171          * as A0 is not in production.
10172          */
10173         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10174                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10175
10176         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10177          * we need to disable memory and use config. cycles
10178          * only to access all registers. The 5702/03 chips
10179          * can mistakenly decode the special cycles from the
10180          * ICH chipsets as memory write cycles, causing corruption
10181          * of register and memory space. Only certain ICH bridges
10182          * will drive special cycles with non-zero data during the
10183          * address phase which can fall within the 5703's address
10184          * range. This is not an ICH bug as the PCI spec allows
10185          * non-zero address during special cycles. However, only
10186          * these ICH bridges are known to drive non-zero addresses
10187          * during special cycles.
10188          *
10189          * Since special cycles do not cross PCI bridges, we only
10190          * enable this workaround if the 5703 is on the secondary
10191          * bus of these ICH bridges.
10192          */
10193         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10194             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10195                 static struct tg3_dev_id {
10196                         u32     vendor;
10197                         u32     device;
10198                         u32     rev;
10199                 } ich_chipsets[] = {
10200                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10201                           PCI_ANY_ID },
10202                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10203                           PCI_ANY_ID },
10204                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10205                           0xa },
10206                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10207                           PCI_ANY_ID },
10208                         { },
10209                 };
10210                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10211                 struct pci_dev *bridge = NULL;
10212
10213                 while (pci_id->vendor != 0) {
10214                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10215                                                 bridge);
10216                         if (!bridge) {
10217                                 pci_id++;
10218                                 continue;
10219                         }
10220                         if (pci_id->rev != PCI_ANY_ID) {
10221                                 u8 rev;
10222
10223                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10224                                                      &rev);
10225                                 if (rev > pci_id->rev)
10226                                         continue;
10227                         }
10228                         if (bridge->subordinate &&
10229                             (bridge->subordinate->number ==
10230                              tp->pdev->bus->number)) {
10231
10232                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10233                                 pci_dev_put(bridge);
10234                                 break;
10235                         }
10236                 }
10237         }
10238
10239         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10240          * DMA addresses > 40-bit. This bridge may have other additional
10241          * 57xx devices behind it in some 4-port NIC designs for example.
10242          * Any tg3 device found behind the bridge will also need the 40-bit
10243          * DMA workaround.
10244          */
10245         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10246             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10247                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10248                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10249                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10250         }
10251         else {
10252                 struct pci_dev *bridge = NULL;
10253
10254                 do {
10255                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10256                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10257                                                 bridge);
10258                         if (bridge && bridge->subordinate &&
10259                             (bridge->subordinate->number <=
10260                              tp->pdev->bus->number) &&
10261                             (bridge->subordinate->subordinate >=
10262                              tp->pdev->bus->number)) {
10263                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10264                                 pci_dev_put(bridge);
10265                                 break;
10266                         }
10267                 } while (bridge);
10268         }
10269
10270         /* Initialize misc host control in PCI block. */
10271         tp->misc_host_ctrl |= (misc_ctrl_reg &
10272                                MISC_HOST_CTRL_CHIPREV);
10273         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10274                                tp->misc_host_ctrl);
10275
10276         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10277                               &cacheline_sz_reg);
10278
10279         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10280         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10281         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10282         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10283
10284         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10285             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10286             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10287             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10288             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10289                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10290
10291         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10292             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10293                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10294
10295         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10296                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10297                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10298                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10299                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10300                 } else {
10301                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
10302                                           TG3_FLG2_HW_TSO_1_BUG;
10303                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10304                                 ASIC_REV_5750 &&
10305                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10306                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
10307                 }
10308         }
10309
10310         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10311             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10312             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10313             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10314             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10315                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10316
10317         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10318                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10319
10320         /* If we have an AMD 762 or VIA K8T800 chipset, write
10321          * reordering to the mailbox registers done by the host
10322          * controller can cause major troubles.  We read back from
10323          * every mailbox register write to force the writes to be
10324          * posted to the chip in order.
10325          */
10326         if (pci_dev_present(write_reorder_chipsets) &&
10327             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10328                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10329
10330         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10331             tp->pci_lat_timer < 64) {
10332                 tp->pci_lat_timer = 64;
10333
10334                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10335                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10336                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10337                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10338
10339                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10340                                        cacheline_sz_reg);
10341         }
10342
10343         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10344                               &pci_state_reg);
10345
10346         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10347                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10348
10349                 /* If this is a 5700 BX chipset, and we are in PCI-X
10350                  * mode, enable register write workaround.
10351                  *
10352                  * The workaround is to use indirect register accesses
10353                  * for all chip writes not to mailbox registers.
10354                  */
10355                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10356                         u32 pm_reg;
10357                         u16 pci_cmd;
10358
10359                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10360
10361                         /* The chip can have it's power management PCI config
10362                          * space registers clobbered due to this bug.
10363                          * So explicitly force the chip into D0 here.
10364                          */
10365                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10366                                               &pm_reg);
10367                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10368                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10369                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10370                                                pm_reg);
10371
10372                         /* Also, force SERR#/PERR# in PCI command. */
10373                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10374                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10375                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10376                 }
10377         }
10378
10379         /* 5700 BX chips need to have their TX producer index mailboxes
10380          * written twice to workaround a bug.
10381          */
10382         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10383                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10384
10385         /* Back to back register writes can cause problems on this chip,
10386          * the workaround is to read back all reg writes except those to
10387          * mailbox regs.  See tg3_write_indirect_reg32().
10388          *
10389          * PCI Express 5750_A0 rev chips need this workaround too.
10390          */
10391         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10392             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10393              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10394                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10395
10396         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10397                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10398         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10399                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10400
10401         /* Chip-specific fixup from Broadcom driver */
10402         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10403             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10404                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10405                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10406         }
10407
10408         /* Default fast path register access methods */
10409         tp->read32 = tg3_read32;
10410         tp->write32 = tg3_write32;
10411         tp->read32_mbox = tg3_read32;
10412         tp->write32_mbox = tg3_write32;
10413         tp->write32_tx_mbox = tg3_write32;
10414         tp->write32_rx_mbox = tg3_write32;
10415
10416         /* Various workaround register access methods */
10417         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10418                 tp->write32 = tg3_write_indirect_reg32;
10419         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10420                 tp->write32 = tg3_write_flush_reg32;
10421
10422         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10423             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10424                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10425                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10426                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10427         }
10428
10429         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10430                 tp->read32 = tg3_read_indirect_reg32;
10431                 tp->write32 = tg3_write_indirect_reg32;
10432                 tp->read32_mbox = tg3_read_indirect_mbox;
10433                 tp->write32_mbox = tg3_write_indirect_mbox;
10434                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10435                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10436
10437                 iounmap(tp->regs);
10438                 tp->regs = NULL;
10439
10440                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10441                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10442                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10443         }
10444
10445         if (tp->write32 == tg3_write_indirect_reg32 ||
10446             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10447              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10448               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10449                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10450
10451         /* Get eeprom hw config before calling tg3_set_power_state().
10452          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10453          * determined before calling tg3_set_power_state() so that
10454          * we know whether or not to switch out of Vaux power.
10455          * When the flag is set, it means that GPIO1 is used for eeprom
10456          * write protect and also implies that it is a LOM where GPIOs
10457          * are not used to switch power.
10458          */
10459         tg3_get_eeprom_hw_cfg(tp);
10460
10461         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10462          * GPIO1 driven high will bring 5700's external PHY out of reset.
10463          * It is also used as eeprom write protect on LOMs.
10464          */
10465         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10466         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10467             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10468                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10469                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10470         /* Unused GPIO3 must be driven as output on 5752 because there
10471          * are no pull-up resistors on unused GPIO pins.
10472          */
10473         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10474                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10475
10476         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10477                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10478
10479         /* Force the chip into D0. */
10480         err = tg3_set_power_state(tp, PCI_D0);
10481         if (err) {
10482                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10483                        pci_name(tp->pdev));
10484                 return err;
10485         }
10486
10487         /* 5700 B0 chips do not support checksumming correctly due
10488          * to hardware bugs.
10489          */
10490         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10491                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10492
10493         /* Derive initial jumbo mode from MTU assigned in
10494          * ether_setup() via the alloc_etherdev() call
10495          */
10496         if (tp->dev->mtu > ETH_DATA_LEN &&
10497             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10498                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10499
10500         /* Determine WakeOnLan speed to use. */
10501         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10502             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10503             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10504             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10505                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10506         } else {
10507                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10508         }
10509
10510         /* A few boards don't want Ethernet@WireSpeed phy feature */
10511         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10512             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10513              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10514              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10515             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10516                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10517
10518         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10519             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10520                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10521         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10522                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10523
10524         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10525                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10526                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10527                         tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10528                 else
10529                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10530         }
10531
10532         tp->coalesce_mode = 0;
10533         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10534             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10535                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10536
10537         /* Initialize MAC MI mode, polling disabled. */
10538         tw32_f(MAC_MI_MODE, tp->mi_mode);
10539         udelay(80);
10540
10541         /* Initialize data/descriptor byte/word swapping. */
10542         val = tr32(GRC_MODE);
10543         val &= GRC_MODE_HOST_STACKUP;
10544         tw32(GRC_MODE, val | tp->grc_mode);
10545
10546         tg3_switch_clocks(tp);
10547
10548         /* Clear this out for sanity. */
10549         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10550
10551         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10552                               &pci_state_reg);
10553         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10554             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10555                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10556
10557                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10558                     chiprevid == CHIPREV_ID_5701_B0 ||
10559                     chiprevid == CHIPREV_ID_5701_B2 ||
10560                     chiprevid == CHIPREV_ID_5701_B5) {
10561                         void __iomem *sram_base;
10562
10563                         /* Write some dummy words into the SRAM status block
10564                          * area, see if it reads back correctly.  If the return
10565                          * value is bad, force enable the PCIX workaround.
10566                          */
10567                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10568
10569                         writel(0x00000000, sram_base);
10570                         writel(0x00000000, sram_base + 4);
10571                         writel(0xffffffff, sram_base + 4);
10572                         if (readl(sram_base) != 0x00000000)
10573                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10574                 }
10575         }
10576
10577         udelay(50);
10578         tg3_nvram_init(tp);
10579
10580         grc_misc_cfg = tr32(GRC_MISC_CFG);
10581         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10582
10583         /* Broadcom's driver says that CIOBE multisplit has a bug */
10584 #if 0
10585         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10586             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10587                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10588                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10589         }
10590 #endif
10591         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10592             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10593              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10594                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10595
10596         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10597             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10598                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10599         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10600                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10601                                       HOSTCC_MODE_CLRTICK_TXBD);
10602
10603                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10604                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10605                                        tp->misc_host_ctrl);
10606         }
10607
10608         /* these are limited to 10/100 only */
10609         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10610              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10611             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10612              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10613              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10614               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10615               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10616             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10617              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10618               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10619                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10620
10621         err = tg3_phy_probe(tp);
10622         if (err) {
10623                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10624                        pci_name(tp->pdev), err);
10625                 /* ... but do not return immediately ... */
10626         }
10627
10628         tg3_read_partno(tp);
10629         tg3_read_fw_ver(tp);
10630
10631         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10632                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10633         } else {
10634                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10635                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10636                 else
10637                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10638         }
10639
10640         /* 5700 {AX,BX} chips have a broken status block link
10641          * change bit implementation, so we must use the
10642          * status register in those cases.
10643          */
10644         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10645                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10646         else
10647                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10648
10649         /* The led_ctrl is set during tg3_phy_probe, here we might
10650          * have to force the link status polling mechanism based
10651          * upon subsystem IDs.
10652          */
10653         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10654             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10655                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10656                                   TG3_FLAG_USE_LINKCHG_REG);
10657         }
10658
10659         /* For all SERDES we poll the MAC status register. */
10660         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10661                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10662         else
10663                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10664
10665         /* All chips before 5787 can get confused if TX buffers
10666          * straddle the 4GB address boundary in some cases.
10667          */
10668         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10669             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10670                 tp->dev->hard_start_xmit = tg3_start_xmit;
10671         else
10672                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10673
10674         tp->rx_offset = 2;
10675         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10676             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10677                 tp->rx_offset = 0;
10678
10679         tp->rx_std_max_post = TG3_RX_RING_SIZE;
10680
10681         /* Increment the rx prod index on the rx std ring by at most
10682          * 8 for these chips to workaround hw errata.
10683          */
10684         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10685             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10686             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10687                 tp->rx_std_max_post = 8;
10688
10689         /* By default, disable wake-on-lan.  User can change this
10690          * using ETHTOOL_SWOL.
10691          */
10692         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10693
10694         return err;
10695 }
10696
10697 #ifdef CONFIG_SPARC64
10698 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10699 {
10700         struct net_device *dev = tp->dev;
10701         struct pci_dev *pdev = tp->pdev;
10702         struct pcidev_cookie *pcp = pdev->sysdata;
10703
10704         if (pcp != NULL) {
10705                 unsigned char *addr;
10706                 int len;
10707
10708                 addr = of_get_property(pcp->prom_node, "local-mac-address",
10709                                         &len);
10710                 if (addr && len == 6) {
10711                         memcpy(dev->dev_addr, addr, 6);
10712                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10713                         return 0;
10714                 }
10715         }
10716         return -ENODEV;
10717 }
10718
10719 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10720 {
10721         struct net_device *dev = tp->dev;
10722
10723         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10724         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10725         return 0;
10726 }
10727 #endif
10728
10729 static int __devinit tg3_get_device_address(struct tg3 *tp)
10730 {
10731         struct net_device *dev = tp->dev;
10732         u32 hi, lo, mac_offset;
10733         int addr_ok = 0;
10734
10735 #ifdef CONFIG_SPARC64
10736         if (!tg3_get_macaddr_sparc(tp))
10737                 return 0;
10738 #endif
10739
10740         mac_offset = 0x7c;
10741         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10742             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10743                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10744                         mac_offset = 0xcc;
10745                 if (tg3_nvram_lock(tp))
10746                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10747                 else
10748                         tg3_nvram_unlock(tp);
10749         }
10750
10751         /* First try to get it from MAC address mailbox. */
10752         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10753         if ((hi >> 16) == 0x484b) {
10754                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10755                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10756
10757                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10758                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10759                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10760                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10761                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10762
10763                 /* Some old bootcode may report a 0 MAC address in SRAM */
10764                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10765         }
10766         if (!addr_ok) {
10767                 /* Next, try NVRAM. */
10768                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10769                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10770                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
10771                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
10772                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
10773                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
10774                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
10775                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
10776                 }
10777                 /* Finally just fetch it out of the MAC control regs. */
10778                 else {
10779                         hi = tr32(MAC_ADDR_0_HIGH);
10780                         lo = tr32(MAC_ADDR_0_LOW);
10781
10782                         dev->dev_addr[5] = lo & 0xff;
10783                         dev->dev_addr[4] = (lo >> 8) & 0xff;
10784                         dev->dev_addr[3] = (lo >> 16) & 0xff;
10785                         dev->dev_addr[2] = (lo >> 24) & 0xff;
10786                         dev->dev_addr[1] = hi & 0xff;
10787                         dev->dev_addr[0] = (hi >> 8) & 0xff;
10788                 }
10789         }
10790
10791         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10792 #ifdef CONFIG_SPARC64
10793                 if (!tg3_get_default_macaddr_sparc(tp))
10794                         return 0;
10795 #endif
10796                 return -EINVAL;
10797         }
10798         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10799         return 0;
10800 }
10801
10802 #define BOUNDARY_SINGLE_CACHELINE       1
10803 #define BOUNDARY_MULTI_CACHELINE        2
10804
10805 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10806 {
10807         int cacheline_size;
10808         u8 byte;
10809         int goal;
10810
10811         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10812         if (byte == 0)
10813                 cacheline_size = 1024;
10814         else
10815                 cacheline_size = (int) byte * 4;
10816
10817         /* On 5703 and later chips, the boundary bits have no
10818          * effect.
10819          */
10820         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10821             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10822             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10823                 goto out;
10824
10825 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10826         goal = BOUNDARY_MULTI_CACHELINE;
10827 #else
10828 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10829         goal = BOUNDARY_SINGLE_CACHELINE;
10830 #else
10831         goal = 0;
10832 #endif
10833 #endif
10834
10835         if (!goal)
10836                 goto out;
10837
10838         /* PCI controllers on most RISC systems tend to disconnect
10839          * when a device tries to burst across a cache-line boundary.
10840          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10841          *
10842          * Unfortunately, for PCI-E there are only limited
10843          * write-side controls for this, and thus for reads
10844          * we will still get the disconnects.  We'll also waste
10845          * these PCI cycles for both read and write for chips
10846          * other than 5700 and 5701 which do not implement the
10847          * boundary bits.
10848          */
10849         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10850             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10851                 switch (cacheline_size) {
10852                 case 16:
10853                 case 32:
10854                 case 64:
10855                 case 128:
10856                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10857                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10858                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10859                         } else {
10860                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10861                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10862                         }
10863                         break;
10864
10865                 case 256:
10866                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10867                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10868                         break;
10869
10870                 default:
10871                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10872                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10873                         break;
10874                 };
10875         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10876                 switch (cacheline_size) {
10877                 case 16:
10878                 case 32:
10879                 case 64:
10880                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10881                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10882                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10883                                 break;
10884                         }
10885                         /* fallthrough */
10886                 case 128:
10887                 default:
10888                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10889                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10890                         break;
10891                 };
10892         } else {
10893                 switch (cacheline_size) {
10894                 case 16:
10895                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10896                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10897                                         DMA_RWCTRL_WRITE_BNDRY_16);
10898                                 break;
10899                         }
10900                         /* fallthrough */
10901                 case 32:
10902                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10903                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10904                                         DMA_RWCTRL_WRITE_BNDRY_32);
10905                                 break;
10906                         }
10907                         /* fallthrough */
10908                 case 64:
10909                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10910                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10911                                         DMA_RWCTRL_WRITE_BNDRY_64);
10912                                 break;
10913                         }
10914                         /* fallthrough */
10915                 case 128:
10916                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10917                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10918                                         DMA_RWCTRL_WRITE_BNDRY_128);
10919                                 break;
10920                         }
10921                         /* fallthrough */
10922                 case 256:
10923                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10924                                 DMA_RWCTRL_WRITE_BNDRY_256);
10925                         break;
10926                 case 512:
10927                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10928                                 DMA_RWCTRL_WRITE_BNDRY_512);
10929                         break;
10930                 case 1024:
10931                 default:
10932                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10933                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10934                         break;
10935                 };
10936         }
10937
10938 out:
10939         return val;
10940 }
10941
10942 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10943 {
10944         struct tg3_internal_buffer_desc test_desc;
10945         u32 sram_dma_descs;
10946         int i, ret;
10947
10948         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10949
10950         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10951         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10952         tw32(RDMAC_STATUS, 0);
10953         tw32(WDMAC_STATUS, 0);
10954
10955         tw32(BUFMGR_MODE, 0);
10956         tw32(FTQ_RESET, 0);
10957
10958         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10959         test_desc.addr_lo = buf_dma & 0xffffffff;
10960         test_desc.nic_mbuf = 0x00002100;
10961         test_desc.len = size;
10962
10963         /*
10964          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10965          * the *second* time the tg3 driver was getting loaded after an
10966          * initial scan.
10967          *
10968          * Broadcom tells me:
10969          *   ...the DMA engine is connected to the GRC block and a DMA
10970          *   reset may affect the GRC block in some unpredictable way...
10971          *   The behavior of resets to individual blocks has not been tested.
10972          *
10973          * Broadcom noted the GRC reset will also reset all sub-components.
10974          */
10975         if (to_device) {
10976                 test_desc.cqid_sqid = (13 << 8) | 2;
10977
10978                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10979                 udelay(40);
10980         } else {
10981                 test_desc.cqid_sqid = (16 << 8) | 7;
10982
10983                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10984                 udelay(40);
10985         }
10986         test_desc.flags = 0x00000005;
10987
10988         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10989                 u32 val;
10990
10991                 val = *(((u32 *)&test_desc) + i);
10992                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10993                                        sram_dma_descs + (i * sizeof(u32)));
10994                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10995         }
10996         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10997
10998         if (to_device) {
10999                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11000         } else {
11001                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11002         }
11003
11004         ret = -ENODEV;
11005         for (i = 0; i < 40; i++) {
11006                 u32 val;
11007
11008                 if (to_device)
11009                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11010                 else
11011                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11012                 if ((val & 0xffff) == sram_dma_descs) {
11013                         ret = 0;
11014                         break;
11015                 }
11016
11017                 udelay(100);
11018         }
11019
11020         return ret;
11021 }
11022
11023 #define TEST_BUFFER_SIZE        0x2000
11024
11025 static int __devinit tg3_test_dma(struct tg3 *tp)
11026 {
11027         dma_addr_t buf_dma;
11028         u32 *buf, saved_dma_rwctrl;
11029         int ret;
11030
11031         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11032         if (!buf) {
11033                 ret = -ENOMEM;
11034                 goto out_nofree;
11035         }
11036
11037         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11038                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11039
11040         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11041
11042         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11043                 /* DMA read watermark not used on PCIE */
11044                 tp->dma_rwctrl |= 0x00180000;
11045         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11046                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11047                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11048                         tp->dma_rwctrl |= 0x003f0000;
11049                 else
11050                         tp->dma_rwctrl |= 0x003f000f;
11051         } else {
11052                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11053                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11054                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11055
11056                         /* If the 5704 is behind the EPB bridge, we can
11057                          * do the less restrictive ONE_DMA workaround for
11058                          * better performance.
11059                          */
11060                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11061                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11062                                 tp->dma_rwctrl |= 0x8000;
11063                         else if (ccval == 0x6 || ccval == 0x7)
11064                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11065
11066                         /* Set bit 23 to enable PCIX hw bug fix */
11067                         tp->dma_rwctrl |= 0x009f0000;
11068                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11069                         /* 5780 always in PCIX mode */
11070                         tp->dma_rwctrl |= 0x00144000;
11071                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11072                         /* 5714 always in PCIX mode */
11073                         tp->dma_rwctrl |= 0x00148000;
11074                 } else {
11075                         tp->dma_rwctrl |= 0x001b000f;
11076                 }
11077         }
11078
11079         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11080             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11081                 tp->dma_rwctrl &= 0xfffffff0;
11082
11083         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11084             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11085                 /* Remove this if it causes problems for some boards. */
11086                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11087
11088                 /* On 5700/5701 chips, we need to set this bit.
11089                  * Otherwise the chip will issue cacheline transactions
11090                  * to streamable DMA memory with not all the byte
11091                  * enables turned on.  This is an error on several
11092                  * RISC PCI controllers, in particular sparc64.
11093                  *
11094                  * On 5703/5704 chips, this bit has been reassigned
11095                  * a different meaning.  In particular, it is used
11096                  * on those chips to enable a PCI-X workaround.
11097                  */
11098                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11099         }
11100
11101         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11102
11103 #if 0
11104         /* Unneeded, already done by tg3_get_invariants.  */
11105         tg3_switch_clocks(tp);
11106 #endif
11107
11108         ret = 0;
11109         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11110             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11111                 goto out;
11112
11113         /* It is best to perform DMA test with maximum write burst size
11114          * to expose the 5700/5701 write DMA bug.
11115          */
11116         saved_dma_rwctrl = tp->dma_rwctrl;
11117         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11118         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11119
11120         while (1) {
11121                 u32 *p = buf, i;
11122
11123                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11124                         p[i] = i;
11125
11126                 /* Send the buffer to the chip. */
11127                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11128                 if (ret) {
11129                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11130                         break;
11131                 }
11132
11133 #if 0
11134                 /* validate data reached card RAM correctly. */
11135                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11136                         u32 val;
11137                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
11138                         if (le32_to_cpu(val) != p[i]) {
11139                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
11140                                 /* ret = -ENODEV here? */
11141                         }
11142                         p[i] = 0;
11143                 }
11144 #endif
11145                 /* Now read it back. */
11146                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11147                 if (ret) {
11148                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11149
11150                         break;
11151                 }
11152
11153                 /* Verify it. */
11154                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11155                         if (p[i] == i)
11156                                 continue;
11157
11158                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11159                             DMA_RWCTRL_WRITE_BNDRY_16) {
11160                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11161                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11162                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11163                                 break;
11164                         } else {
11165                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11166                                 ret = -ENODEV;
11167                                 goto out;
11168                         }
11169                 }
11170
11171                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11172                         /* Success. */
11173                         ret = 0;
11174                         break;
11175                 }
11176         }
11177         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11178             DMA_RWCTRL_WRITE_BNDRY_16) {
11179                 static struct pci_device_id dma_wait_state_chipsets[] = {
11180                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11181                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11182                         { },
11183                 };
11184
11185                 /* DMA test passed without adjusting DMA boundary,
11186                  * now look for chipsets that are known to expose the
11187                  * DMA bug without failing the test.
11188                  */
11189                 if (pci_dev_present(dma_wait_state_chipsets)) {
11190                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11191                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11192                 }
11193                 else
11194                         /* Safe to use the calculated DMA boundary. */
11195                         tp->dma_rwctrl = saved_dma_rwctrl;
11196
11197                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11198         }
11199
11200 out:
11201         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11202 out_nofree:
11203         return ret;
11204 }
11205
11206 static void __devinit tg3_init_link_config(struct tg3 *tp)
11207 {
11208         tp->link_config.advertising =
11209                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11210                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11211                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11212                  ADVERTISED_Autoneg | ADVERTISED_MII);
11213         tp->link_config.speed = SPEED_INVALID;
11214         tp->link_config.duplex = DUPLEX_INVALID;
11215         tp->link_config.autoneg = AUTONEG_ENABLE;
11216         tp->link_config.active_speed = SPEED_INVALID;
11217         tp->link_config.active_duplex = DUPLEX_INVALID;
11218         tp->link_config.phy_is_low_power = 0;
11219         tp->link_config.orig_speed = SPEED_INVALID;
11220         tp->link_config.orig_duplex = DUPLEX_INVALID;
11221         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11222 }
11223
11224 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11225 {
11226         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11227                 tp->bufmgr_config.mbuf_read_dma_low_water =
11228                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11229                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11230                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11231                 tp->bufmgr_config.mbuf_high_water =
11232                         DEFAULT_MB_HIGH_WATER_5705;
11233
11234                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11235                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11236                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11237                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11238                 tp->bufmgr_config.mbuf_high_water_jumbo =
11239                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11240         } else {
11241                 tp->bufmgr_config.mbuf_read_dma_low_water =
11242                         DEFAULT_MB_RDMA_LOW_WATER;
11243                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11244                         DEFAULT_MB_MACRX_LOW_WATER;
11245                 tp->bufmgr_config.mbuf_high_water =
11246                         DEFAULT_MB_HIGH_WATER;
11247
11248                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11249                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11250                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11251                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11252                 tp->bufmgr_config.mbuf_high_water_jumbo =
11253                         DEFAULT_MB_HIGH_WATER_JUMBO;
11254         }
11255
11256         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11257         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11258 }
11259
11260 static char * __devinit tg3_phy_string(struct tg3 *tp)
11261 {
11262         switch (tp->phy_id & PHY_ID_MASK) {
11263         case PHY_ID_BCM5400:    return "5400";
11264         case PHY_ID_BCM5401:    return "5401";
11265         case PHY_ID_BCM5411:    return "5411";
11266         case PHY_ID_BCM5701:    return "5701";
11267         case PHY_ID_BCM5703:    return "5703";
11268         case PHY_ID_BCM5704:    return "5704";
11269         case PHY_ID_BCM5705:    return "5705";
11270         case PHY_ID_BCM5750:    return "5750";
11271         case PHY_ID_BCM5752:    return "5752";
11272         case PHY_ID_BCM5714:    return "5714";
11273         case PHY_ID_BCM5780:    return "5780";
11274         case PHY_ID_BCM5755:    return "5755";
11275         case PHY_ID_BCM5787:    return "5787";
11276         case PHY_ID_BCM8002:    return "8002/serdes";
11277         case 0:                 return "serdes";
11278         default:                return "unknown";
11279         };
11280 }
11281
11282 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11283 {
11284         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11285                 strcpy(str, "PCI Express");
11286                 return str;
11287         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11288                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11289
11290                 strcpy(str, "PCIX:");
11291
11292                 if ((clock_ctrl == 7) ||
11293                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11294                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11295                         strcat(str, "133MHz");
11296                 else if (clock_ctrl == 0)
11297                         strcat(str, "33MHz");
11298                 else if (clock_ctrl == 2)
11299                         strcat(str, "50MHz");
11300                 else if (clock_ctrl == 4)
11301                         strcat(str, "66MHz");
11302                 else if (clock_ctrl == 6)
11303                         strcat(str, "100MHz");
11304         } else {
11305                 strcpy(str, "PCI:");
11306                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11307                         strcat(str, "66MHz");
11308                 else
11309                         strcat(str, "33MHz");
11310         }
11311         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11312                 strcat(str, ":32-bit");
11313         else
11314                 strcat(str, ":64-bit");
11315         return str;
11316 }
11317
11318 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11319 {
11320         struct pci_dev *peer;
11321         unsigned int func, devnr = tp->pdev->devfn & ~7;
11322
11323         for (func = 0; func < 8; func++) {
11324                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11325                 if (peer && peer != tp->pdev)
11326                         break;
11327                 pci_dev_put(peer);
11328         }
11329         /* 5704 can be configured in single-port mode, set peer to
11330          * tp->pdev in that case.
11331          */
11332         if (!peer) {
11333                 peer = tp->pdev;
11334                 return peer;
11335         }
11336
11337         /*
11338          * We don't need to keep the refcount elevated; there's no way
11339          * to remove one half of this device without removing the other
11340          */
11341         pci_dev_put(peer);
11342
11343         return peer;
11344 }
11345
11346 static void __devinit tg3_init_coal(struct tg3 *tp)
11347 {
11348         struct ethtool_coalesce *ec = &tp->coal;
11349
11350         memset(ec, 0, sizeof(*ec));
11351         ec->cmd = ETHTOOL_GCOALESCE;
11352         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11353         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11354         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11355         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11356         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11357         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11358         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11359         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11360         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11361
11362         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11363                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11364                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11365                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11366                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11367                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11368         }
11369
11370         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11371                 ec->rx_coalesce_usecs_irq = 0;
11372                 ec->tx_coalesce_usecs_irq = 0;
11373                 ec->stats_block_coalesce_usecs = 0;
11374         }
11375 }
11376
11377 static int __devinit tg3_init_one(struct pci_dev *pdev,
11378                                   const struct pci_device_id *ent)
11379 {
11380         static int tg3_version_printed = 0;
11381         unsigned long tg3reg_base, tg3reg_len;
11382         struct net_device *dev;
11383         struct tg3 *tp;
11384         int i, err, pm_cap;
11385         char str[40];
11386         u64 dma_mask, persist_dma_mask;
11387
11388         if (tg3_version_printed++ == 0)
11389                 printk(KERN_INFO "%s", version);
11390
11391         err = pci_enable_device(pdev);
11392         if (err) {
11393                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11394                        "aborting.\n");
11395                 return err;
11396         }
11397
11398         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11399                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11400                        "base address, aborting.\n");
11401                 err = -ENODEV;
11402                 goto err_out_disable_pdev;
11403         }
11404
11405         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11406         if (err) {
11407                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11408                        "aborting.\n");
11409                 goto err_out_disable_pdev;
11410         }
11411
11412         pci_set_master(pdev);
11413
11414         /* Find power-management capability. */
11415         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11416         if (pm_cap == 0) {
11417                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11418                        "aborting.\n");
11419                 err = -EIO;
11420                 goto err_out_free_res;
11421         }
11422
11423         tg3reg_base = pci_resource_start(pdev, 0);
11424         tg3reg_len = pci_resource_len(pdev, 0);
11425
11426         dev = alloc_etherdev(sizeof(*tp));
11427         if (!dev) {
11428                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11429                 err = -ENOMEM;
11430                 goto err_out_free_res;
11431         }
11432
11433         SET_MODULE_OWNER(dev);
11434         SET_NETDEV_DEV(dev, &pdev->dev);
11435
11436 #if TG3_VLAN_TAG_USED
11437         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11438         dev->vlan_rx_register = tg3_vlan_rx_register;
11439         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11440 #endif
11441
11442         tp = netdev_priv(dev);
11443         tp->pdev = pdev;
11444         tp->dev = dev;
11445         tp->pm_cap = pm_cap;
11446         tp->mac_mode = TG3_DEF_MAC_MODE;
11447         tp->rx_mode = TG3_DEF_RX_MODE;
11448         tp->tx_mode = TG3_DEF_TX_MODE;
11449         tp->mi_mode = MAC_MI_MODE_BASE;
11450         if (tg3_debug > 0)
11451                 tp->msg_enable = tg3_debug;
11452         else
11453                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11454
11455         /* The word/byte swap controls here control register access byte
11456          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11457          * setting below.
11458          */
11459         tp->misc_host_ctrl =
11460                 MISC_HOST_CTRL_MASK_PCI_INT |
11461                 MISC_HOST_CTRL_WORD_SWAP |
11462                 MISC_HOST_CTRL_INDIR_ACCESS |
11463                 MISC_HOST_CTRL_PCISTATE_RW;
11464
11465         /* The NONFRM (non-frame) byte/word swap controls take effect
11466          * on descriptor entries, anything which isn't packet data.
11467          *
11468          * The StrongARM chips on the board (one for tx, one for rx)
11469          * are running in big-endian mode.
11470          */
11471         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11472                         GRC_MODE_WSWAP_NONFRM_DATA);
11473 #ifdef __BIG_ENDIAN
11474         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11475 #endif
11476         spin_lock_init(&tp->lock);
11477         spin_lock_init(&tp->indirect_lock);
11478         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11479
11480         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11481         if (tp->regs == 0UL) {
11482                 printk(KERN_ERR PFX "Cannot map device registers, "
11483                        "aborting.\n");
11484                 err = -ENOMEM;
11485                 goto err_out_free_dev;
11486         }
11487
11488         tg3_init_link_config(tp);
11489
11490         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11491         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11492         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11493
11494         dev->open = tg3_open;
11495         dev->stop = tg3_close;
11496         dev->get_stats = tg3_get_stats;
11497         dev->set_multicast_list = tg3_set_rx_mode;
11498         dev->set_mac_address = tg3_set_mac_addr;
11499         dev->do_ioctl = tg3_ioctl;
11500         dev->tx_timeout = tg3_tx_timeout;
11501         dev->poll = tg3_poll;
11502         dev->ethtool_ops = &tg3_ethtool_ops;
11503         dev->weight = 64;
11504         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11505         dev->change_mtu = tg3_change_mtu;
11506         dev->irq = pdev->irq;
11507 #ifdef CONFIG_NET_POLL_CONTROLLER
11508         dev->poll_controller = tg3_poll_controller;
11509 #endif
11510
11511         err = tg3_get_invariants(tp);
11512         if (err) {
11513                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11514                        "aborting.\n");
11515                 goto err_out_iounmap;
11516         }
11517
11518         /* The EPB bridge inside 5714, 5715, and 5780 and any
11519          * device behind the EPB cannot support DMA addresses > 40-bit.
11520          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11521          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11522          * do DMA address check in tg3_start_xmit().
11523          */
11524         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11525                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11526         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11527                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11528 #ifdef CONFIG_HIGHMEM
11529                 dma_mask = DMA_64BIT_MASK;
11530 #endif
11531         } else
11532                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11533
11534         /* Configure DMA attributes. */
11535         if (dma_mask > DMA_32BIT_MASK) {
11536                 err = pci_set_dma_mask(pdev, dma_mask);
11537                 if (!err) {
11538                         dev->features |= NETIF_F_HIGHDMA;
11539                         err = pci_set_consistent_dma_mask(pdev,
11540                                                           persist_dma_mask);
11541                         if (err < 0) {
11542                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11543                                        "DMA for consistent allocations\n");
11544                                 goto err_out_iounmap;
11545                         }
11546                 }
11547         }
11548         if (err || dma_mask == DMA_32BIT_MASK) {
11549                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11550                 if (err) {
11551                         printk(KERN_ERR PFX "No usable DMA configuration, "
11552                                "aborting.\n");
11553                         goto err_out_iounmap;
11554                 }
11555         }
11556
11557         tg3_init_bufmgr_config(tp);
11558
11559 #if TG3_TSO_SUPPORT != 0
11560         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11561                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11562         }
11563         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11564             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11565             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11566             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11567                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11568         } else {
11569                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11570         }
11571
11572         /* TSO is on by default on chips that support hardware TSO.
11573          * Firmware TSO on older chips gives lower performance, so it
11574          * is off by default, but can be enabled using ethtool.
11575          */
11576         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11577                 dev->features |= NETIF_F_TSO;
11578                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
11579                         dev->features |= NETIF_F_TSO6;
11580         }
11581
11582 #endif
11583
11584         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11585             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11586             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11587                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11588                 tp->rx_pending = 63;
11589         }
11590
11591         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11592             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11593                 tp->pdev_peer = tg3_find_peer(tp);
11594
11595         err = tg3_get_device_address(tp);
11596         if (err) {
11597                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11598                        "aborting.\n");
11599                 goto err_out_iounmap;
11600         }
11601
11602         /*
11603          * Reset chip in case UNDI or EFI driver did not shutdown
11604          * DMA self test will enable WDMAC and we'll see (spurious)
11605          * pending DMA on the PCI bus at that point.
11606          */
11607         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11608             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11609                 pci_save_state(tp->pdev);
11610                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11611                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11612         }
11613
11614         err = tg3_test_dma(tp);
11615         if (err) {
11616                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11617                 goto err_out_iounmap;
11618         }
11619
11620         /* Tigon3 can do ipv4 only... and some chips have buggy
11621          * checksumming.
11622          */
11623         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11624                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11625                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11626                         dev->features |= NETIF_F_HW_CSUM;
11627                 else
11628                         dev->features |= NETIF_F_IP_CSUM;
11629                 dev->features |= NETIF_F_SG;
11630                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11631         } else
11632                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11633
11634         /* flow control autonegotiation is default behavior */
11635         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11636
11637         tg3_init_coal(tp);
11638
11639         /* Now that we have fully setup the chip, save away a snapshot
11640          * of the PCI config space.  We need to restore this after
11641          * GRC_MISC_CFG core clock resets and some resume events.
11642          */
11643         pci_save_state(tp->pdev);
11644
11645         err = register_netdev(dev);
11646         if (err) {
11647                 printk(KERN_ERR PFX "Cannot register net device, "
11648                        "aborting.\n");
11649                 goto err_out_iounmap;
11650         }
11651
11652         pci_set_drvdata(pdev, dev);
11653
11654         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11655                dev->name,
11656                tp->board_part_number,
11657                tp->pci_chip_rev_id,
11658                tg3_phy_string(tp),
11659                tg3_bus_string(tp, str),
11660                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11661
11662         for (i = 0; i < 6; i++)
11663                 printk("%2.2x%c", dev->dev_addr[i],
11664                        i == 5 ? '\n' : ':');
11665
11666         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11667                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11668                "TSOcap[%d] \n",
11669                dev->name,
11670                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11671                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11672                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11673                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11674                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11675                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11676                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11677         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11678                dev->name, tp->dma_rwctrl,
11679                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11680                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11681
11682         netif_carrier_off(tp->dev);
11683
11684         return 0;
11685
11686 err_out_iounmap:
11687         if (tp->regs) {
11688                 iounmap(tp->regs);
11689                 tp->regs = NULL;
11690         }
11691
11692 err_out_free_dev:
11693         free_netdev(dev);
11694
11695 err_out_free_res:
11696         pci_release_regions(pdev);
11697
11698 err_out_disable_pdev:
11699         pci_disable_device(pdev);
11700         pci_set_drvdata(pdev, NULL);
11701         return err;
11702 }
11703
11704 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11705 {
11706         struct net_device *dev = pci_get_drvdata(pdev);
11707
11708         if (dev) {
11709                 struct tg3 *tp = netdev_priv(dev);
11710
11711                 flush_scheduled_work();
11712                 unregister_netdev(dev);
11713                 if (tp->regs) {
11714                         iounmap(tp->regs);
11715                         tp->regs = NULL;
11716                 }
11717                 free_netdev(dev);
11718                 pci_release_regions(pdev);
11719                 pci_disable_device(pdev);
11720                 pci_set_drvdata(pdev, NULL);
11721         }
11722 }
11723
11724 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11725 {
11726         struct net_device *dev = pci_get_drvdata(pdev);
11727         struct tg3 *tp = netdev_priv(dev);
11728         int err;
11729
11730         if (!netif_running(dev))
11731                 return 0;
11732
11733         flush_scheduled_work();
11734         tg3_netif_stop(tp);
11735
11736         del_timer_sync(&tp->timer);
11737
11738         tg3_full_lock(tp, 1);
11739         tg3_disable_ints(tp);
11740         tg3_full_unlock(tp);
11741
11742         netif_device_detach(dev);
11743
11744         tg3_full_lock(tp, 0);
11745         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11746         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11747         tg3_full_unlock(tp);
11748
11749         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11750         if (err) {
11751                 tg3_full_lock(tp, 0);
11752
11753                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11754                 if (tg3_restart_hw(tp, 1))
11755                         goto out;
11756
11757                 tp->timer.expires = jiffies + tp->timer_offset;
11758                 add_timer(&tp->timer);
11759
11760                 netif_device_attach(dev);
11761                 tg3_netif_start(tp);
11762
11763 out:
11764                 tg3_full_unlock(tp);
11765         }
11766
11767         return err;
11768 }
11769
11770 static int tg3_resume(struct pci_dev *pdev)
11771 {
11772         struct net_device *dev = pci_get_drvdata(pdev);
11773         struct tg3 *tp = netdev_priv(dev);
11774         int err;
11775
11776         if (!netif_running(dev))
11777                 return 0;
11778
11779         pci_restore_state(tp->pdev);
11780
11781         err = tg3_set_power_state(tp, PCI_D0);
11782         if (err)
11783                 return err;
11784
11785         netif_device_attach(dev);
11786
11787         tg3_full_lock(tp, 0);
11788
11789         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11790         err = tg3_restart_hw(tp, 1);
11791         if (err)
11792                 goto out;
11793
11794         tp->timer.expires = jiffies + tp->timer_offset;
11795         add_timer(&tp->timer);
11796
11797         tg3_netif_start(tp);
11798
11799 out:
11800         tg3_full_unlock(tp);
11801
11802         return err;
11803 }
11804
11805 static struct pci_driver tg3_driver = {
11806         .name           = DRV_MODULE_NAME,
11807         .id_table       = tg3_pci_tbl,
11808         .probe          = tg3_init_one,
11809         .remove         = __devexit_p(tg3_remove_one),
11810         .suspend        = tg3_suspend,
11811         .resume         = tg3_resume
11812 };
11813
11814 static int __init tg3_init(void)
11815 {
11816         return pci_register_driver(&tg3_driver);
11817 }
11818
11819 static void __exit tg3_cleanup(void)
11820 {
11821         pci_unregister_driver(&tg3_driver);
11822 }
11823
11824 module_init(tg3_init);
11825 module_exit(tg3_cleanup);