]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/tg3.c
[TG3]: Remove unnecessary tx_lock
[linux-2.6-omap-h63xx.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/if_vlan.h>
37 #include <linux/ip.h>
38 #include <linux/tcp.h>
39 #include <linux/workqueue.h>
40 #include <linux/prefetch.h>
41 #include <linux/dma-mapping.h>
42
43 #include <net/checksum.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC64
51 #include <asm/idprom.h>
52 #include <asm/oplib.h>
53 #include <asm/pbm.h>
54 #endif
55
56 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57 #define TG3_VLAN_TAG_USED 1
58 #else
59 #define TG3_VLAN_TAG_USED 0
60 #endif
61
62 #ifdef NETIF_F_TSO
63 #define TG3_TSO_SUPPORT 1
64 #else
65 #define TG3_TSO_SUPPORT 0
66 #endif
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME         "tg3"
71 #define PFX DRV_MODULE_NAME     ": "
72 #define DRV_MODULE_VERSION      "3.60"
73 #define DRV_MODULE_RELDATE      "June 17, 2006"
74
75 #define TG3_DEF_MAC_MODE        0
76 #define TG3_DEF_RX_MODE         0
77 #define TG3_DEF_TX_MODE         0
78 #define TG3_DEF_MSG_ENABLE        \
79         (NETIF_MSG_DRV          | \
80          NETIF_MSG_PROBE        | \
81          NETIF_MSG_LINK         | \
82          NETIF_MSG_TIMER        | \
83          NETIF_MSG_IFDOWN       | \
84          NETIF_MSG_IFUP         | \
85          NETIF_MSG_RX_ERR       | \
86          NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89  * and dev->tx_timeout() should be called to fix the problem
90  */
91 #define TG3_TX_TIMEOUT                  (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU                     60
95 #define TG3_MAX_MTU(tp) \
96         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99  * You can't change the ring sizes, but you can change where you place
100  * them in the NIC onboard memory.
101  */
102 #define TG3_RX_RING_SIZE                512
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JUMBO_RING_SIZE          256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                    TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define TX_BUFFS_AVAIL(TP)                                              \
128         ((TP)->tx_pending -                                             \
129          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155 static struct pci_device_id tg3_pci_tbl[] = {
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
245           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
247           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
249           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
250         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
251           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
253           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
254         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
255           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
256         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
257           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
258         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
259           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
260         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
261           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
262         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
263           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
264         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
265           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
266         { 0, }
267 };
268
269 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
270
271 static struct {
272         const char string[ETH_GSTRING_LEN];
273 } ethtool_stats_keys[TG3_NUM_STATS] = {
274         { "rx_octets" },
275         { "rx_fragments" },
276         { "rx_ucast_packets" },
277         { "rx_mcast_packets" },
278         { "rx_bcast_packets" },
279         { "rx_fcs_errors" },
280         { "rx_align_errors" },
281         { "rx_xon_pause_rcvd" },
282         { "rx_xoff_pause_rcvd" },
283         { "rx_mac_ctrl_rcvd" },
284         { "rx_xoff_entered" },
285         { "rx_frame_too_long_errors" },
286         { "rx_jabbers" },
287         { "rx_undersize_packets" },
288         { "rx_in_length_errors" },
289         { "rx_out_length_errors" },
290         { "rx_64_or_less_octet_packets" },
291         { "rx_65_to_127_octet_packets" },
292         { "rx_128_to_255_octet_packets" },
293         { "rx_256_to_511_octet_packets" },
294         { "rx_512_to_1023_octet_packets" },
295         { "rx_1024_to_1522_octet_packets" },
296         { "rx_1523_to_2047_octet_packets" },
297         { "rx_2048_to_4095_octet_packets" },
298         { "rx_4096_to_8191_octet_packets" },
299         { "rx_8192_to_9022_octet_packets" },
300
301         { "tx_octets" },
302         { "tx_collisions" },
303
304         { "tx_xon_sent" },
305         { "tx_xoff_sent" },
306         { "tx_flow_control" },
307         { "tx_mac_errors" },
308         { "tx_single_collisions" },
309         { "tx_mult_collisions" },
310         { "tx_deferred" },
311         { "tx_excessive_collisions" },
312         { "tx_late_collisions" },
313         { "tx_collide_2times" },
314         { "tx_collide_3times" },
315         { "tx_collide_4times" },
316         { "tx_collide_5times" },
317         { "tx_collide_6times" },
318         { "tx_collide_7times" },
319         { "tx_collide_8times" },
320         { "tx_collide_9times" },
321         { "tx_collide_10times" },
322         { "tx_collide_11times" },
323         { "tx_collide_12times" },
324         { "tx_collide_13times" },
325         { "tx_collide_14times" },
326         { "tx_collide_15times" },
327         { "tx_ucast_packets" },
328         { "tx_mcast_packets" },
329         { "tx_bcast_packets" },
330         { "tx_carrier_sense_errors" },
331         { "tx_discards" },
332         { "tx_errors" },
333
334         { "dma_writeq_full" },
335         { "dma_write_prioq_full" },
336         { "rxbds_empty" },
337         { "rx_discards" },
338         { "rx_errors" },
339         { "rx_threshold_hit" },
340
341         { "dma_readq_full" },
342         { "dma_read_prioq_full" },
343         { "tx_comp_queue_full" },
344
345         { "ring_set_send_prod_index" },
346         { "ring_status_update" },
347         { "nic_irqs" },
348         { "nic_avoided_irqs" },
349         { "nic_tx_threshold_hit" }
350 };
351
352 static struct {
353         const char string[ETH_GSTRING_LEN];
354 } ethtool_test_keys[TG3_NUM_TEST] = {
355         { "nvram test     (online) " },
356         { "link test      (online) " },
357         { "register test  (offline)" },
358         { "memory test    (offline)" },
359         { "loopback test  (offline)" },
360         { "interrupt test (offline)" },
361 };
362
363 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
364 {
365         writel(val, tp->regs + off);
366 }
367
368 static u32 tg3_read32(struct tg3 *tp, u32 off)
369 {
370         return (readl(tp->regs + off)); 
371 }
372
373 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
374 {
375         unsigned long flags;
376
377         spin_lock_irqsave(&tp->indirect_lock, flags);
378         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
379         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
380         spin_unlock_irqrestore(&tp->indirect_lock, flags);
381 }
382
383 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
384 {
385         writel(val, tp->regs + off);
386         readl(tp->regs + off);
387 }
388
389 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
390 {
391         unsigned long flags;
392         u32 val;
393
394         spin_lock_irqsave(&tp->indirect_lock, flags);
395         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
396         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
397         spin_unlock_irqrestore(&tp->indirect_lock, flags);
398         return val;
399 }
400
401 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
402 {
403         unsigned long flags;
404
405         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
406                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
407                                        TG3_64BIT_REG_LOW, val);
408                 return;
409         }
410         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
411                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
412                                        TG3_64BIT_REG_LOW, val);
413                 return;
414         }
415
416         spin_lock_irqsave(&tp->indirect_lock, flags);
417         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
418         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
419         spin_unlock_irqrestore(&tp->indirect_lock, flags);
420
421         /* In indirect mode when disabling interrupts, we also need
422          * to clear the interrupt bit in the GRC local ctrl register.
423          */
424         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
425             (val == 0x1)) {
426                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
427                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
428         }
429 }
430
431 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
432 {
433         unsigned long flags;
434         u32 val;
435
436         spin_lock_irqsave(&tp->indirect_lock, flags);
437         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
438         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
439         spin_unlock_irqrestore(&tp->indirect_lock, flags);
440         return val;
441 }
442
443 /* usec_wait specifies the wait time in usec when writing to certain registers
444  * where it is unsafe to read back the register without some delay.
445  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
446  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
447  */
448 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
449 {
450         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
451             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
452                 /* Non-posted methods */
453                 tp->write32(tp, off, val);
454         else {
455                 /* Posted method */
456                 tg3_write32(tp, off, val);
457                 if (usec_wait)
458                         udelay(usec_wait);
459                 tp->read32(tp, off);
460         }
461         /* Wait again after the read for the posted method to guarantee that
462          * the wait time is met.
463          */
464         if (usec_wait)
465                 udelay(usec_wait);
466 }
467
468 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
469 {
470         tp->write32_mbox(tp, off, val);
471         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
472             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
473                 tp->read32_mbox(tp, off);
474 }
475
476 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
477 {
478         void __iomem *mbox = tp->regs + off;
479         writel(val, mbox);
480         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
481                 writel(val, mbox);
482         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
483                 readl(mbox);
484 }
485
486 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
487 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
488 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
489 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
490 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
491
492 #define tw32(reg,val)           tp->write32(tp, reg, val)
493 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
494 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
495 #define tr32(reg)               tp->read32(tp, reg)
496
497 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
498 {
499         unsigned long flags;
500
501         spin_lock_irqsave(&tp->indirect_lock, flags);
502         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
503                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
504                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
505
506                 /* Always leave this as zero. */
507                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
508         } else {
509                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
510                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
511
512                 /* Always leave this as zero. */
513                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
514         }
515         spin_unlock_irqrestore(&tp->indirect_lock, flags);
516 }
517
518 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
519 {
520         unsigned long flags;
521
522         spin_lock_irqsave(&tp->indirect_lock, flags);
523         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
524                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
525                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
526
527                 /* Always leave this as zero. */
528                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
529         } else {
530                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
531                 *val = tr32(TG3PCI_MEM_WIN_DATA);
532
533                 /* Always leave this as zero. */
534                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
535         }
536         spin_unlock_irqrestore(&tp->indirect_lock, flags);
537 }
538
539 static void tg3_disable_ints(struct tg3 *tp)
540 {
541         tw32(TG3PCI_MISC_HOST_CTRL,
542              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
543         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
544 }
545
546 static inline void tg3_cond_int(struct tg3 *tp)
547 {
548         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
549             (tp->hw_status->status & SD_STATUS_UPDATED))
550                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
551 }
552
553 static void tg3_enable_ints(struct tg3 *tp)
554 {
555         tp->irq_sync = 0;
556         wmb();
557
558         tw32(TG3PCI_MISC_HOST_CTRL,
559              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
560         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
561                        (tp->last_tag << 24));
562         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
563                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
564                                (tp->last_tag << 24));
565         tg3_cond_int(tp);
566 }
567
568 static inline unsigned int tg3_has_work(struct tg3 *tp)
569 {
570         struct tg3_hw_status *sblk = tp->hw_status;
571         unsigned int work_exists = 0;
572
573         /* check for phy events */
574         if (!(tp->tg3_flags &
575               (TG3_FLAG_USE_LINKCHG_REG |
576                TG3_FLAG_POLL_SERDES))) {
577                 if (sblk->status & SD_STATUS_LINK_CHG)
578                         work_exists = 1;
579         }
580         /* check for RX/TX work to do */
581         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
582             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
583                 work_exists = 1;
584
585         return work_exists;
586 }
587
588 /* tg3_restart_ints
589  *  similar to tg3_enable_ints, but it accurately determines whether there
590  *  is new work pending and can return without flushing the PIO write
591  *  which reenables interrupts 
592  */
593 static void tg3_restart_ints(struct tg3 *tp)
594 {
595         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
596                      tp->last_tag << 24);
597         mmiowb();
598
599         /* When doing tagged status, this work check is unnecessary.
600          * The last_tag we write above tells the chip which piece of
601          * work we've completed.
602          */
603         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
604             tg3_has_work(tp))
605                 tw32(HOSTCC_MODE, tp->coalesce_mode |
606                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
607 }
608
609 static inline void tg3_netif_stop(struct tg3 *tp)
610 {
611         tp->dev->trans_start = jiffies; /* prevent tx timeout */
612         netif_poll_disable(tp->dev);
613         netif_tx_disable(tp->dev);
614 }
615
616 static inline void tg3_netif_start(struct tg3 *tp)
617 {
618         netif_wake_queue(tp->dev);
619         /* NOTE: unconditional netif_wake_queue is only appropriate
620          * so long as all callers are assured to have free tx slots
621          * (such as after tg3_init_hw)
622          */
623         netif_poll_enable(tp->dev);
624         tp->hw_status->status |= SD_STATUS_UPDATED;
625         tg3_enable_ints(tp);
626 }
627
628 static void tg3_switch_clocks(struct tg3 *tp)
629 {
630         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
631         u32 orig_clock_ctrl;
632
633         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
634                 return;
635
636         orig_clock_ctrl = clock_ctrl;
637         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
638                        CLOCK_CTRL_CLKRUN_OENABLE |
639                        0x1f);
640         tp->pci_clock_ctrl = clock_ctrl;
641
642         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
643                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
644                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
645                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
646                 }
647         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
648                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
649                             clock_ctrl |
650                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
651                             40);
652                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
653                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
654                             40);
655         }
656         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
657 }
658
659 #define PHY_BUSY_LOOPS  5000
660
661 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
662 {
663         u32 frame_val;
664         unsigned int loops;
665         int ret;
666
667         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
668                 tw32_f(MAC_MI_MODE,
669                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
670                 udelay(80);
671         }
672
673         *val = 0x0;
674
675         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
676                       MI_COM_PHY_ADDR_MASK);
677         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
678                       MI_COM_REG_ADDR_MASK);
679         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
680         
681         tw32_f(MAC_MI_COM, frame_val);
682
683         loops = PHY_BUSY_LOOPS;
684         while (loops != 0) {
685                 udelay(10);
686                 frame_val = tr32(MAC_MI_COM);
687
688                 if ((frame_val & MI_COM_BUSY) == 0) {
689                         udelay(5);
690                         frame_val = tr32(MAC_MI_COM);
691                         break;
692                 }
693                 loops -= 1;
694         }
695
696         ret = -EBUSY;
697         if (loops != 0) {
698                 *val = frame_val & MI_COM_DATA_MASK;
699                 ret = 0;
700         }
701
702         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
703                 tw32_f(MAC_MI_MODE, tp->mi_mode);
704                 udelay(80);
705         }
706
707         return ret;
708 }
709
710 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
711 {
712         u32 frame_val;
713         unsigned int loops;
714         int ret;
715
716         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
717                 tw32_f(MAC_MI_MODE,
718                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
719                 udelay(80);
720         }
721
722         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
723                       MI_COM_PHY_ADDR_MASK);
724         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
725                       MI_COM_REG_ADDR_MASK);
726         frame_val |= (val & MI_COM_DATA_MASK);
727         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
728         
729         tw32_f(MAC_MI_COM, frame_val);
730
731         loops = PHY_BUSY_LOOPS;
732         while (loops != 0) {
733                 udelay(10);
734                 frame_val = tr32(MAC_MI_COM);
735                 if ((frame_val & MI_COM_BUSY) == 0) {
736                         udelay(5);
737                         frame_val = tr32(MAC_MI_COM);
738                         break;
739                 }
740                 loops -= 1;
741         }
742
743         ret = -EBUSY;
744         if (loops != 0)
745                 ret = 0;
746
747         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
748                 tw32_f(MAC_MI_MODE, tp->mi_mode);
749                 udelay(80);
750         }
751
752         return ret;
753 }
754
755 static void tg3_phy_set_wirespeed(struct tg3 *tp)
756 {
757         u32 val;
758
759         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
760                 return;
761
762         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
763             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
764                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
765                              (val | (1 << 15) | (1 << 4)));
766 }
767
768 static int tg3_bmcr_reset(struct tg3 *tp)
769 {
770         u32 phy_control;
771         int limit, err;
772
773         /* OK, reset it, and poll the BMCR_RESET bit until it
774          * clears or we time out.
775          */
776         phy_control = BMCR_RESET;
777         err = tg3_writephy(tp, MII_BMCR, phy_control);
778         if (err != 0)
779                 return -EBUSY;
780
781         limit = 5000;
782         while (limit--) {
783                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
784                 if (err != 0)
785                         return -EBUSY;
786
787                 if ((phy_control & BMCR_RESET) == 0) {
788                         udelay(40);
789                         break;
790                 }
791                 udelay(10);
792         }
793         if (limit <= 0)
794                 return -EBUSY;
795
796         return 0;
797 }
798
799 static int tg3_wait_macro_done(struct tg3 *tp)
800 {
801         int limit = 100;
802
803         while (limit--) {
804                 u32 tmp32;
805
806                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
807                         if ((tmp32 & 0x1000) == 0)
808                                 break;
809                 }
810         }
811         if (limit <= 0)
812                 return -EBUSY;
813
814         return 0;
815 }
816
817 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
818 {
819         static const u32 test_pat[4][6] = {
820         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
821         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
822         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
823         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
824         };
825         int chan;
826
827         for (chan = 0; chan < 4; chan++) {
828                 int i;
829
830                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
831                              (chan * 0x2000) | 0x0200);
832                 tg3_writephy(tp, 0x16, 0x0002);
833
834                 for (i = 0; i < 6; i++)
835                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
836                                      test_pat[chan][i]);
837
838                 tg3_writephy(tp, 0x16, 0x0202);
839                 if (tg3_wait_macro_done(tp)) {
840                         *resetp = 1;
841                         return -EBUSY;
842                 }
843
844                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
845                              (chan * 0x2000) | 0x0200);
846                 tg3_writephy(tp, 0x16, 0x0082);
847                 if (tg3_wait_macro_done(tp)) {
848                         *resetp = 1;
849                         return -EBUSY;
850                 }
851
852                 tg3_writephy(tp, 0x16, 0x0802);
853                 if (tg3_wait_macro_done(tp)) {
854                         *resetp = 1;
855                         return -EBUSY;
856                 }
857
858                 for (i = 0; i < 6; i += 2) {
859                         u32 low, high;
860
861                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
862                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
863                             tg3_wait_macro_done(tp)) {
864                                 *resetp = 1;
865                                 return -EBUSY;
866                         }
867                         low &= 0x7fff;
868                         high &= 0x000f;
869                         if (low != test_pat[chan][i] ||
870                             high != test_pat[chan][i+1]) {
871                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
872                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
873                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
874
875                                 return -EBUSY;
876                         }
877                 }
878         }
879
880         return 0;
881 }
882
883 static int tg3_phy_reset_chanpat(struct tg3 *tp)
884 {
885         int chan;
886
887         for (chan = 0; chan < 4; chan++) {
888                 int i;
889
890                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
891                              (chan * 0x2000) | 0x0200);
892                 tg3_writephy(tp, 0x16, 0x0002);
893                 for (i = 0; i < 6; i++)
894                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
895                 tg3_writephy(tp, 0x16, 0x0202);
896                 if (tg3_wait_macro_done(tp))
897                         return -EBUSY;
898         }
899
900         return 0;
901 }
902
903 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
904 {
905         u32 reg32, phy9_orig;
906         int retries, do_phy_reset, err;
907
908         retries = 10;
909         do_phy_reset = 1;
910         do {
911                 if (do_phy_reset) {
912                         err = tg3_bmcr_reset(tp);
913                         if (err)
914                                 return err;
915                         do_phy_reset = 0;
916                 }
917
918                 /* Disable transmitter and interrupt.  */
919                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
920                         continue;
921
922                 reg32 |= 0x3000;
923                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
924
925                 /* Set full-duplex, 1000 mbps.  */
926                 tg3_writephy(tp, MII_BMCR,
927                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
928
929                 /* Set to master mode.  */
930                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
931                         continue;
932
933                 tg3_writephy(tp, MII_TG3_CTRL,
934                              (MII_TG3_CTRL_AS_MASTER |
935                               MII_TG3_CTRL_ENABLE_AS_MASTER));
936
937                 /* Enable SM_DSP_CLOCK and 6dB.  */
938                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
939
940                 /* Block the PHY control access.  */
941                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
942                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
943
944                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
945                 if (!err)
946                         break;
947         } while (--retries);
948
949         err = tg3_phy_reset_chanpat(tp);
950         if (err)
951                 return err;
952
953         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
954         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
955
956         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
957         tg3_writephy(tp, 0x16, 0x0000);
958
959         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
960             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
961                 /* Set Extended packet length bit for jumbo frames */
962                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
963         }
964         else {
965                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
966         }
967
968         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
969
970         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
971                 reg32 &= ~0x3000;
972                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
973         } else if (!err)
974                 err = -EBUSY;
975
976         return err;
977 }
978
979 static void tg3_link_report(struct tg3 *);
980
981 /* This will reset the tigon3 PHY if there is no valid
982  * link unless the FORCE argument is non-zero.
983  */
984 static int tg3_phy_reset(struct tg3 *tp)
985 {
986         u32 phy_status;
987         int err;
988
989         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
990         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
991         if (err != 0)
992                 return -EBUSY;
993
994         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
995                 netif_carrier_off(tp->dev);
996                 tg3_link_report(tp);
997         }
998
999         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1000             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1001             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1002                 err = tg3_phy_reset_5703_4_5(tp);
1003                 if (err)
1004                         return err;
1005                 goto out;
1006         }
1007
1008         err = tg3_bmcr_reset(tp);
1009         if (err)
1010                 return err;
1011
1012 out:
1013         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1014                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1015                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1016                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1017                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1018                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1019                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1020         }
1021         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1022                 tg3_writephy(tp, 0x1c, 0x8d68);
1023                 tg3_writephy(tp, 0x1c, 0x8d68);
1024         }
1025         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1026                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1027                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1028                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1029                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1030                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1031                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1032                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1033                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1034         }
1035         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1036                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1037                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1038                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1039                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1040         }
1041         /* Set Extended packet length bit (bit 14) on all chips that */
1042         /* support jumbo frames */
1043         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1044                 /* Cannot do read-modify-write on 5401 */
1045                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1046         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1047                 u32 phy_reg;
1048
1049                 /* Set bit 14 with read-modify-write to preserve other bits */
1050                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1051                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1052                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1053         }
1054
1055         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1056          * jumbo frames transmission.
1057          */
1058         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1059                 u32 phy_reg;
1060
1061                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1062                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1063                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1064         }
1065
1066         tg3_phy_set_wirespeed(tp);
1067         return 0;
1068 }
1069
1070 static void tg3_frob_aux_power(struct tg3 *tp)
1071 {
1072         struct tg3 *tp_peer = tp;
1073
1074         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1075                 return;
1076
1077         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1078             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1079                 struct net_device *dev_peer;
1080
1081                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1082                 /* remove_one() may have been run on the peer. */
1083                 if (!dev_peer)
1084                         tp_peer = tp;
1085                 else
1086                         tp_peer = netdev_priv(dev_peer);
1087         }
1088
1089         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1090             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1091             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1092             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1093                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1094                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1095                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1096                                     (GRC_LCLCTRL_GPIO_OE0 |
1097                                      GRC_LCLCTRL_GPIO_OE1 |
1098                                      GRC_LCLCTRL_GPIO_OE2 |
1099                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1100                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1101                                     100);
1102                 } else {
1103                         u32 no_gpio2;
1104                         u32 grc_local_ctrl = 0;
1105
1106                         if (tp_peer != tp &&
1107                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1108                                 return;
1109
1110                         /* Workaround to prevent overdrawing Amps. */
1111                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1112                             ASIC_REV_5714) {
1113                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1114                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1115                                             grc_local_ctrl, 100);
1116                         }
1117
1118                         /* On 5753 and variants, GPIO2 cannot be used. */
1119                         no_gpio2 = tp->nic_sram_data_cfg &
1120                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1121
1122                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1123                                          GRC_LCLCTRL_GPIO_OE1 |
1124                                          GRC_LCLCTRL_GPIO_OE2 |
1125                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1126                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1127                         if (no_gpio2) {
1128                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1129                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1130                         }
1131                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1132                                                     grc_local_ctrl, 100);
1133
1134                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1135
1136                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1137                                                     grc_local_ctrl, 100);
1138
1139                         if (!no_gpio2) {
1140                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1141                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1142                                             grc_local_ctrl, 100);
1143                         }
1144                 }
1145         } else {
1146                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1147                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1148                         if (tp_peer != tp &&
1149                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1150                                 return;
1151
1152                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1153                                     (GRC_LCLCTRL_GPIO_OE1 |
1154                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1155
1156                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1157                                     GRC_LCLCTRL_GPIO_OE1, 100);
1158
1159                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1160                                     (GRC_LCLCTRL_GPIO_OE1 |
1161                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1162                 }
1163         }
1164 }
1165
1166 static int tg3_setup_phy(struct tg3 *, int);
1167
1168 #define RESET_KIND_SHUTDOWN     0
1169 #define RESET_KIND_INIT         1
1170 #define RESET_KIND_SUSPEND      2
1171
1172 static void tg3_write_sig_post_reset(struct tg3 *, int);
1173 static int tg3_halt_cpu(struct tg3 *, u32);
1174 static int tg3_nvram_lock(struct tg3 *);
1175 static void tg3_nvram_unlock(struct tg3 *);
1176
1177 static void tg3_power_down_phy(struct tg3 *tp)
1178 {
1179         /* The PHY should not be powered down on some chips because
1180          * of bugs.
1181          */
1182         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1183             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1184             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1185              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1186                 return;
1187         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1188 }
1189
1190 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1191 {
1192         u32 misc_host_ctrl;
1193         u16 power_control, power_caps;
1194         int pm = tp->pm_cap;
1195
1196         /* Make sure register accesses (indirect or otherwise)
1197          * will function correctly.
1198          */
1199         pci_write_config_dword(tp->pdev,
1200                                TG3PCI_MISC_HOST_CTRL,
1201                                tp->misc_host_ctrl);
1202
1203         pci_read_config_word(tp->pdev,
1204                              pm + PCI_PM_CTRL,
1205                              &power_control);
1206         power_control |= PCI_PM_CTRL_PME_STATUS;
1207         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1208         switch (state) {
1209         case PCI_D0:
1210                 power_control |= 0;
1211                 pci_write_config_word(tp->pdev,
1212                                       pm + PCI_PM_CTRL,
1213                                       power_control);
1214                 udelay(100);    /* Delay after power state change */
1215
1216                 /* Switch out of Vaux if it is not a LOM */
1217                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1218                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1219
1220                 return 0;
1221
1222         case PCI_D1:
1223                 power_control |= 1;
1224                 break;
1225
1226         case PCI_D2:
1227                 power_control |= 2;
1228                 break;
1229
1230         case PCI_D3hot:
1231                 power_control |= 3;
1232                 break;
1233
1234         default:
1235                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1236                        "requested.\n",
1237                        tp->dev->name, state);
1238                 return -EINVAL;
1239         };
1240
1241         power_control |= PCI_PM_CTRL_PME_ENABLE;
1242
1243         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1244         tw32(TG3PCI_MISC_HOST_CTRL,
1245              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1246
1247         if (tp->link_config.phy_is_low_power == 0) {
1248                 tp->link_config.phy_is_low_power = 1;
1249                 tp->link_config.orig_speed = tp->link_config.speed;
1250                 tp->link_config.orig_duplex = tp->link_config.duplex;
1251                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1252         }
1253
1254         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1255                 tp->link_config.speed = SPEED_10;
1256                 tp->link_config.duplex = DUPLEX_HALF;
1257                 tp->link_config.autoneg = AUTONEG_ENABLE;
1258                 tg3_setup_phy(tp, 0);
1259         }
1260
1261         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1262                 int i;
1263                 u32 val;
1264
1265                 for (i = 0; i < 200; i++) {
1266                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1267                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1268                                 break;
1269                         msleep(1);
1270                 }
1271         }
1272         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1273                                              WOL_DRV_STATE_SHUTDOWN |
1274                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1275
1276         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1277
1278         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1279                 u32 mac_mode;
1280
1281                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1282                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1283                         udelay(40);
1284
1285                         mac_mode = MAC_MODE_PORT_MODE_MII;
1286
1287                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1288                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1289                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1290                 } else {
1291                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1292                 }
1293
1294                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1295                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1296
1297                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1298                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1299                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1300
1301                 tw32_f(MAC_MODE, mac_mode);
1302                 udelay(100);
1303
1304                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1305                 udelay(10);
1306         }
1307
1308         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1309             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1310              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1311                 u32 base_val;
1312
1313                 base_val = tp->pci_clock_ctrl;
1314                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1315                              CLOCK_CTRL_TXCLK_DISABLE);
1316
1317                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1318                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1319         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1320                 /* do nothing */
1321         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1322                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1323                 u32 newbits1, newbits2;
1324
1325                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1326                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1327                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1328                                     CLOCK_CTRL_TXCLK_DISABLE |
1329                                     CLOCK_CTRL_ALTCLK);
1330                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1331                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1332                         newbits1 = CLOCK_CTRL_625_CORE;
1333                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1334                 } else {
1335                         newbits1 = CLOCK_CTRL_ALTCLK;
1336                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1337                 }
1338
1339                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1340                             40);
1341
1342                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1343                             40);
1344
1345                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1346                         u32 newbits3;
1347
1348                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1349                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1350                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1351                                             CLOCK_CTRL_TXCLK_DISABLE |
1352                                             CLOCK_CTRL_44MHZ_CORE);
1353                         } else {
1354                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1355                         }
1356
1357                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1358                                     tp->pci_clock_ctrl | newbits3, 40);
1359                 }
1360         }
1361
1362         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1363             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1364                 /* Turn off the PHY */
1365                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1366                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1367                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1368                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1369                         tg3_power_down_phy(tp);
1370                 }
1371         }
1372
1373         tg3_frob_aux_power(tp);
1374
1375         /* Workaround for unstable PLL clock */
1376         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1377             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1378                 u32 val = tr32(0x7d00);
1379
1380                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1381                 tw32(0x7d00, val);
1382                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1383                         int err;
1384
1385                         err = tg3_nvram_lock(tp);
1386                         tg3_halt_cpu(tp, RX_CPU_BASE);
1387                         if (!err)
1388                                 tg3_nvram_unlock(tp);
1389                 }
1390         }
1391
1392         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1393
1394         /* Finally, set the new power state. */
1395         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1396         udelay(100);    /* Delay after power state change */
1397
1398         return 0;
1399 }
1400
1401 static void tg3_link_report(struct tg3 *tp)
1402 {
1403         if (!netif_carrier_ok(tp->dev)) {
1404                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1405         } else {
1406                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1407                        tp->dev->name,
1408                        (tp->link_config.active_speed == SPEED_1000 ?
1409                         1000 :
1410                         (tp->link_config.active_speed == SPEED_100 ?
1411                          100 : 10)),
1412                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1413                         "full" : "half"));
1414
1415                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1416                        "%s for RX.\n",
1417                        tp->dev->name,
1418                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1419                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1420         }
1421 }
1422
1423 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1424 {
1425         u32 new_tg3_flags = 0;
1426         u32 old_rx_mode = tp->rx_mode;
1427         u32 old_tx_mode = tp->tx_mode;
1428
1429         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1430
1431                 /* Convert 1000BaseX flow control bits to 1000BaseT
1432                  * bits before resolving flow control.
1433                  */
1434                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1435                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1436                                        ADVERTISE_PAUSE_ASYM);
1437                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1438
1439                         if (local_adv & ADVERTISE_1000XPAUSE)
1440                                 local_adv |= ADVERTISE_PAUSE_CAP;
1441                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1442                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1443                         if (remote_adv & LPA_1000XPAUSE)
1444                                 remote_adv |= LPA_PAUSE_CAP;
1445                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1446                                 remote_adv |= LPA_PAUSE_ASYM;
1447                 }
1448
1449                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1450                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1451                                 if (remote_adv & LPA_PAUSE_CAP)
1452                                         new_tg3_flags |=
1453                                                 (TG3_FLAG_RX_PAUSE |
1454                                                 TG3_FLAG_TX_PAUSE);
1455                                 else if (remote_adv & LPA_PAUSE_ASYM)
1456                                         new_tg3_flags |=
1457                                                 (TG3_FLAG_RX_PAUSE);
1458                         } else {
1459                                 if (remote_adv & LPA_PAUSE_CAP)
1460                                         new_tg3_flags |=
1461                                                 (TG3_FLAG_RX_PAUSE |
1462                                                 TG3_FLAG_TX_PAUSE);
1463                         }
1464                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1465                         if ((remote_adv & LPA_PAUSE_CAP) &&
1466                         (remote_adv & LPA_PAUSE_ASYM))
1467                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1468                 }
1469
1470                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1471                 tp->tg3_flags |= new_tg3_flags;
1472         } else {
1473                 new_tg3_flags = tp->tg3_flags;
1474         }
1475
1476         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1477                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1478         else
1479                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1480
1481         if (old_rx_mode != tp->rx_mode) {
1482                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1483         }
1484         
1485         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1486                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1487         else
1488                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1489
1490         if (old_tx_mode != tp->tx_mode) {
1491                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1492         }
1493 }
1494
1495 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1496 {
1497         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1498         case MII_TG3_AUX_STAT_10HALF:
1499                 *speed = SPEED_10;
1500                 *duplex = DUPLEX_HALF;
1501                 break;
1502
1503         case MII_TG3_AUX_STAT_10FULL:
1504                 *speed = SPEED_10;
1505                 *duplex = DUPLEX_FULL;
1506                 break;
1507
1508         case MII_TG3_AUX_STAT_100HALF:
1509                 *speed = SPEED_100;
1510                 *duplex = DUPLEX_HALF;
1511                 break;
1512
1513         case MII_TG3_AUX_STAT_100FULL:
1514                 *speed = SPEED_100;
1515                 *duplex = DUPLEX_FULL;
1516                 break;
1517
1518         case MII_TG3_AUX_STAT_1000HALF:
1519                 *speed = SPEED_1000;
1520                 *duplex = DUPLEX_HALF;
1521                 break;
1522
1523         case MII_TG3_AUX_STAT_1000FULL:
1524                 *speed = SPEED_1000;
1525                 *duplex = DUPLEX_FULL;
1526                 break;
1527
1528         default:
1529                 *speed = SPEED_INVALID;
1530                 *duplex = DUPLEX_INVALID;
1531                 break;
1532         };
1533 }
1534
1535 static void tg3_phy_copper_begin(struct tg3 *tp)
1536 {
1537         u32 new_adv;
1538         int i;
1539
1540         if (tp->link_config.phy_is_low_power) {
1541                 /* Entering low power mode.  Disable gigabit and
1542                  * 100baseT advertisements.
1543                  */
1544                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1545
1546                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1547                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1548                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1549                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1550
1551                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1552         } else if (tp->link_config.speed == SPEED_INVALID) {
1553                 tp->link_config.advertising =
1554                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1555                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1556                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1557                          ADVERTISED_Autoneg | ADVERTISED_MII);
1558
1559                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1560                         tp->link_config.advertising &=
1561                                 ~(ADVERTISED_1000baseT_Half |
1562                                   ADVERTISED_1000baseT_Full);
1563
1564                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1565                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1566                         new_adv |= ADVERTISE_10HALF;
1567                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1568                         new_adv |= ADVERTISE_10FULL;
1569                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1570                         new_adv |= ADVERTISE_100HALF;
1571                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1572                         new_adv |= ADVERTISE_100FULL;
1573                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1574
1575                 if (tp->link_config.advertising &
1576                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1577                         new_adv = 0;
1578                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1579                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1580                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1581                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1582                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1583                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1584                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1585                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1586                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1587                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1588                 } else {
1589                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1590                 }
1591         } else {
1592                 /* Asking for a specific link mode. */
1593                 if (tp->link_config.speed == SPEED_1000) {
1594                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1595                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1596
1597                         if (tp->link_config.duplex == DUPLEX_FULL)
1598                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1599                         else
1600                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1601                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1602                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1603                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1604                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1605                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1606                 } else {
1607                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1608
1609                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1610                         if (tp->link_config.speed == SPEED_100) {
1611                                 if (tp->link_config.duplex == DUPLEX_FULL)
1612                                         new_adv |= ADVERTISE_100FULL;
1613                                 else
1614                                         new_adv |= ADVERTISE_100HALF;
1615                         } else {
1616                                 if (tp->link_config.duplex == DUPLEX_FULL)
1617                                         new_adv |= ADVERTISE_10FULL;
1618                                 else
1619                                         new_adv |= ADVERTISE_10HALF;
1620                         }
1621                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1622                 }
1623         }
1624
1625         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1626             tp->link_config.speed != SPEED_INVALID) {
1627                 u32 bmcr, orig_bmcr;
1628
1629                 tp->link_config.active_speed = tp->link_config.speed;
1630                 tp->link_config.active_duplex = tp->link_config.duplex;
1631
1632                 bmcr = 0;
1633                 switch (tp->link_config.speed) {
1634                 default:
1635                 case SPEED_10:
1636                         break;
1637
1638                 case SPEED_100:
1639                         bmcr |= BMCR_SPEED100;
1640                         break;
1641
1642                 case SPEED_1000:
1643                         bmcr |= TG3_BMCR_SPEED1000;
1644                         break;
1645                 };
1646
1647                 if (tp->link_config.duplex == DUPLEX_FULL)
1648                         bmcr |= BMCR_FULLDPLX;
1649
1650                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1651                     (bmcr != orig_bmcr)) {
1652                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1653                         for (i = 0; i < 1500; i++) {
1654                                 u32 tmp;
1655
1656                                 udelay(10);
1657                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1658                                     tg3_readphy(tp, MII_BMSR, &tmp))
1659                                         continue;
1660                                 if (!(tmp & BMSR_LSTATUS)) {
1661                                         udelay(40);
1662                                         break;
1663                                 }
1664                         }
1665                         tg3_writephy(tp, MII_BMCR, bmcr);
1666                         udelay(40);
1667                 }
1668         } else {
1669                 tg3_writephy(tp, MII_BMCR,
1670                              BMCR_ANENABLE | BMCR_ANRESTART);
1671         }
1672 }
1673
1674 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1675 {
1676         int err;
1677
1678         /* Turn off tap power management. */
1679         /* Set Extended packet length bit */
1680         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1681
1682         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1683         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1684
1685         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1686         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1687
1688         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1689         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1690
1691         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1692         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1693
1694         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1695         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1696
1697         udelay(40);
1698
1699         return err;
1700 }
1701
1702 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1703 {
1704         u32 adv_reg, all_mask;
1705
1706         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1707                 return 0;
1708
1709         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1710                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1711         if ((adv_reg & all_mask) != all_mask)
1712                 return 0;
1713         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1714                 u32 tg3_ctrl;
1715
1716                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1717                         return 0;
1718
1719                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1720                             MII_TG3_CTRL_ADV_1000_FULL);
1721                 if ((tg3_ctrl & all_mask) != all_mask)
1722                         return 0;
1723         }
1724         return 1;
1725 }
1726
1727 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1728 {
1729         int current_link_up;
1730         u32 bmsr, dummy;
1731         u16 current_speed;
1732         u8 current_duplex;
1733         int i, err;
1734
1735         tw32(MAC_EVENT, 0);
1736
1737         tw32_f(MAC_STATUS,
1738              (MAC_STATUS_SYNC_CHANGED |
1739               MAC_STATUS_CFG_CHANGED |
1740               MAC_STATUS_MI_COMPLETION |
1741               MAC_STATUS_LNKSTATE_CHANGED));
1742         udelay(40);
1743
1744         tp->mi_mode = MAC_MI_MODE_BASE;
1745         tw32_f(MAC_MI_MODE, tp->mi_mode);
1746         udelay(80);
1747
1748         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1749
1750         /* Some third-party PHYs need to be reset on link going
1751          * down.
1752          */
1753         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1754              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1755              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1756             netif_carrier_ok(tp->dev)) {
1757                 tg3_readphy(tp, MII_BMSR, &bmsr);
1758                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1759                     !(bmsr & BMSR_LSTATUS))
1760                         force_reset = 1;
1761         }
1762         if (force_reset)
1763                 tg3_phy_reset(tp);
1764
1765         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1766                 tg3_readphy(tp, MII_BMSR, &bmsr);
1767                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1768                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1769                         bmsr = 0;
1770
1771                 if (!(bmsr & BMSR_LSTATUS)) {
1772                         err = tg3_init_5401phy_dsp(tp);
1773                         if (err)
1774                                 return err;
1775
1776                         tg3_readphy(tp, MII_BMSR, &bmsr);
1777                         for (i = 0; i < 1000; i++) {
1778                                 udelay(10);
1779                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1780                                     (bmsr & BMSR_LSTATUS)) {
1781                                         udelay(40);
1782                                         break;
1783                                 }
1784                         }
1785
1786                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1787                             !(bmsr & BMSR_LSTATUS) &&
1788                             tp->link_config.active_speed == SPEED_1000) {
1789                                 err = tg3_phy_reset(tp);
1790                                 if (!err)
1791                                         err = tg3_init_5401phy_dsp(tp);
1792                                 if (err)
1793                                         return err;
1794                         }
1795                 }
1796         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1797                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1798                 /* 5701 {A0,B0} CRC bug workaround */
1799                 tg3_writephy(tp, 0x15, 0x0a75);
1800                 tg3_writephy(tp, 0x1c, 0x8c68);
1801                 tg3_writephy(tp, 0x1c, 0x8d68);
1802                 tg3_writephy(tp, 0x1c, 0x8c68);
1803         }
1804
1805         /* Clear pending interrupts... */
1806         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1807         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1808
1809         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1810                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1811         else
1812                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1813
1814         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1815             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1816                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1817                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1818                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1819                 else
1820                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1821         }
1822
1823         current_link_up = 0;
1824         current_speed = SPEED_INVALID;
1825         current_duplex = DUPLEX_INVALID;
1826
1827         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1828                 u32 val;
1829
1830                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1831                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1832                 if (!(val & (1 << 10))) {
1833                         val |= (1 << 10);
1834                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1835                         goto relink;
1836                 }
1837         }
1838
1839         bmsr = 0;
1840         for (i = 0; i < 100; i++) {
1841                 tg3_readphy(tp, MII_BMSR, &bmsr);
1842                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1843                     (bmsr & BMSR_LSTATUS))
1844                         break;
1845                 udelay(40);
1846         }
1847
1848         if (bmsr & BMSR_LSTATUS) {
1849                 u32 aux_stat, bmcr;
1850
1851                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1852                 for (i = 0; i < 2000; i++) {
1853                         udelay(10);
1854                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1855                             aux_stat)
1856                                 break;
1857                 }
1858
1859                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1860                                              &current_speed,
1861                                              &current_duplex);
1862
1863                 bmcr = 0;
1864                 for (i = 0; i < 200; i++) {
1865                         tg3_readphy(tp, MII_BMCR, &bmcr);
1866                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1867                                 continue;
1868                         if (bmcr && bmcr != 0x7fff)
1869                                 break;
1870                         udelay(10);
1871                 }
1872
1873                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1874                         if (bmcr & BMCR_ANENABLE) {
1875                                 current_link_up = 1;
1876
1877                                 /* Force autoneg restart if we are exiting
1878                                  * low power mode.
1879                                  */
1880                                 if (!tg3_copper_is_advertising_all(tp))
1881                                         current_link_up = 0;
1882                         } else {
1883                                 current_link_up = 0;
1884                         }
1885                 } else {
1886                         if (!(bmcr & BMCR_ANENABLE) &&
1887                             tp->link_config.speed == current_speed &&
1888                             tp->link_config.duplex == current_duplex) {
1889                                 current_link_up = 1;
1890                         } else {
1891                                 current_link_up = 0;
1892                         }
1893                 }
1894
1895                 tp->link_config.active_speed = current_speed;
1896                 tp->link_config.active_duplex = current_duplex;
1897         }
1898
1899         if (current_link_up == 1 &&
1900             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1901             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1902                 u32 local_adv, remote_adv;
1903
1904                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1905                         local_adv = 0;
1906                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1907
1908                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1909                         remote_adv = 0;
1910
1911                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1912
1913                 /* If we are not advertising full pause capability,
1914                  * something is wrong.  Bring the link down and reconfigure.
1915                  */
1916                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1917                         current_link_up = 0;
1918                 } else {
1919                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1920                 }
1921         }
1922 relink:
1923         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1924                 u32 tmp;
1925
1926                 tg3_phy_copper_begin(tp);
1927
1928                 tg3_readphy(tp, MII_BMSR, &tmp);
1929                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1930                     (tmp & BMSR_LSTATUS))
1931                         current_link_up = 1;
1932         }
1933
1934         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1935         if (current_link_up == 1) {
1936                 if (tp->link_config.active_speed == SPEED_100 ||
1937                     tp->link_config.active_speed == SPEED_10)
1938                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1939                 else
1940                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1941         } else
1942                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1943
1944         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1945         if (tp->link_config.active_duplex == DUPLEX_HALF)
1946                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1947
1948         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1949         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1950                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1951                     (current_link_up == 1 &&
1952                      tp->link_config.active_speed == SPEED_10))
1953                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1954         } else {
1955                 if (current_link_up == 1)
1956                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1957         }
1958
1959         /* ??? Without this setting Netgear GA302T PHY does not
1960          * ??? send/receive packets...
1961          */
1962         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1963             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1964                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1965                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1966                 udelay(80);
1967         }
1968
1969         tw32_f(MAC_MODE, tp->mac_mode);
1970         udelay(40);
1971
1972         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1973                 /* Polled via timer. */
1974                 tw32_f(MAC_EVENT, 0);
1975         } else {
1976                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1977         }
1978         udelay(40);
1979
1980         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1981             current_link_up == 1 &&
1982             tp->link_config.active_speed == SPEED_1000 &&
1983             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1984              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1985                 udelay(120);
1986                 tw32_f(MAC_STATUS,
1987                      (MAC_STATUS_SYNC_CHANGED |
1988                       MAC_STATUS_CFG_CHANGED));
1989                 udelay(40);
1990                 tg3_write_mem(tp,
1991                               NIC_SRAM_FIRMWARE_MBOX,
1992                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1993         }
1994
1995         if (current_link_up != netif_carrier_ok(tp->dev)) {
1996                 if (current_link_up)
1997                         netif_carrier_on(tp->dev);
1998                 else
1999                         netif_carrier_off(tp->dev);
2000                 tg3_link_report(tp);
2001         }
2002
2003         return 0;
2004 }
2005
2006 struct tg3_fiber_aneginfo {
2007         int state;
2008 #define ANEG_STATE_UNKNOWN              0
2009 #define ANEG_STATE_AN_ENABLE            1
2010 #define ANEG_STATE_RESTART_INIT         2
2011 #define ANEG_STATE_RESTART              3
2012 #define ANEG_STATE_DISABLE_LINK_OK      4
2013 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2014 #define ANEG_STATE_ABILITY_DETECT       6
2015 #define ANEG_STATE_ACK_DETECT_INIT      7
2016 #define ANEG_STATE_ACK_DETECT           8
2017 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2018 #define ANEG_STATE_COMPLETE_ACK         10
2019 #define ANEG_STATE_IDLE_DETECT_INIT     11
2020 #define ANEG_STATE_IDLE_DETECT          12
2021 #define ANEG_STATE_LINK_OK              13
2022 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2023 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2024
2025         u32 flags;
2026 #define MR_AN_ENABLE            0x00000001
2027 #define MR_RESTART_AN           0x00000002
2028 #define MR_AN_COMPLETE          0x00000004
2029 #define MR_PAGE_RX              0x00000008
2030 #define MR_NP_LOADED            0x00000010
2031 #define MR_TOGGLE_TX            0x00000020
2032 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2033 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2034 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2035 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2036 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2037 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2038 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2039 #define MR_TOGGLE_RX            0x00002000
2040 #define MR_NP_RX                0x00004000
2041
2042 #define MR_LINK_OK              0x80000000
2043
2044         unsigned long link_time, cur_time;
2045
2046         u32 ability_match_cfg;
2047         int ability_match_count;
2048
2049         char ability_match, idle_match, ack_match;
2050
2051         u32 txconfig, rxconfig;
2052 #define ANEG_CFG_NP             0x00000080
2053 #define ANEG_CFG_ACK            0x00000040
2054 #define ANEG_CFG_RF2            0x00000020
2055 #define ANEG_CFG_RF1            0x00000010
2056 #define ANEG_CFG_PS2            0x00000001
2057 #define ANEG_CFG_PS1            0x00008000
2058 #define ANEG_CFG_HD             0x00004000
2059 #define ANEG_CFG_FD             0x00002000
2060 #define ANEG_CFG_INVAL          0x00001f06
2061
2062 };
2063 #define ANEG_OK         0
2064 #define ANEG_DONE       1
2065 #define ANEG_TIMER_ENAB 2
2066 #define ANEG_FAILED     -1
2067
2068 #define ANEG_STATE_SETTLE_TIME  10000
2069
2070 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2071                                    struct tg3_fiber_aneginfo *ap)
2072 {
2073         unsigned long delta;
2074         u32 rx_cfg_reg;
2075         int ret;
2076
2077         if (ap->state == ANEG_STATE_UNKNOWN) {
2078                 ap->rxconfig = 0;
2079                 ap->link_time = 0;
2080                 ap->cur_time = 0;
2081                 ap->ability_match_cfg = 0;
2082                 ap->ability_match_count = 0;
2083                 ap->ability_match = 0;
2084                 ap->idle_match = 0;
2085                 ap->ack_match = 0;
2086         }
2087         ap->cur_time++;
2088
2089         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2090                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2091
2092                 if (rx_cfg_reg != ap->ability_match_cfg) {
2093                         ap->ability_match_cfg = rx_cfg_reg;
2094                         ap->ability_match = 0;
2095                         ap->ability_match_count = 0;
2096                 } else {
2097                         if (++ap->ability_match_count > 1) {
2098                                 ap->ability_match = 1;
2099                                 ap->ability_match_cfg = rx_cfg_reg;
2100                         }
2101                 }
2102                 if (rx_cfg_reg & ANEG_CFG_ACK)
2103                         ap->ack_match = 1;
2104                 else
2105                         ap->ack_match = 0;
2106
2107                 ap->idle_match = 0;
2108         } else {
2109                 ap->idle_match = 1;
2110                 ap->ability_match_cfg = 0;
2111                 ap->ability_match_count = 0;
2112                 ap->ability_match = 0;
2113                 ap->ack_match = 0;
2114
2115                 rx_cfg_reg = 0;
2116         }
2117
2118         ap->rxconfig = rx_cfg_reg;
2119         ret = ANEG_OK;
2120
2121         switch(ap->state) {
2122         case ANEG_STATE_UNKNOWN:
2123                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2124                         ap->state = ANEG_STATE_AN_ENABLE;
2125
2126                 /* fallthru */
2127         case ANEG_STATE_AN_ENABLE:
2128                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2129                 if (ap->flags & MR_AN_ENABLE) {
2130                         ap->link_time = 0;
2131                         ap->cur_time = 0;
2132                         ap->ability_match_cfg = 0;
2133                         ap->ability_match_count = 0;
2134                         ap->ability_match = 0;
2135                         ap->idle_match = 0;
2136                         ap->ack_match = 0;
2137
2138                         ap->state = ANEG_STATE_RESTART_INIT;
2139                 } else {
2140                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2141                 }
2142                 break;
2143
2144         case ANEG_STATE_RESTART_INIT:
2145                 ap->link_time = ap->cur_time;
2146                 ap->flags &= ~(MR_NP_LOADED);
2147                 ap->txconfig = 0;
2148                 tw32(MAC_TX_AUTO_NEG, 0);
2149                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2150                 tw32_f(MAC_MODE, tp->mac_mode);
2151                 udelay(40);
2152
2153                 ret = ANEG_TIMER_ENAB;
2154                 ap->state = ANEG_STATE_RESTART;
2155
2156                 /* fallthru */
2157         case ANEG_STATE_RESTART:
2158                 delta = ap->cur_time - ap->link_time;
2159                 if (delta > ANEG_STATE_SETTLE_TIME) {
2160                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2161                 } else {
2162                         ret = ANEG_TIMER_ENAB;
2163                 }
2164                 break;
2165
2166         case ANEG_STATE_DISABLE_LINK_OK:
2167                 ret = ANEG_DONE;
2168                 break;
2169
2170         case ANEG_STATE_ABILITY_DETECT_INIT:
2171                 ap->flags &= ~(MR_TOGGLE_TX);
2172                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2173                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2174                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2175                 tw32_f(MAC_MODE, tp->mac_mode);
2176                 udelay(40);
2177
2178                 ap->state = ANEG_STATE_ABILITY_DETECT;
2179                 break;
2180
2181         case ANEG_STATE_ABILITY_DETECT:
2182                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2183                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2184                 }
2185                 break;
2186
2187         case ANEG_STATE_ACK_DETECT_INIT:
2188                 ap->txconfig |= ANEG_CFG_ACK;
2189                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2190                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2191                 tw32_f(MAC_MODE, tp->mac_mode);
2192                 udelay(40);
2193
2194                 ap->state = ANEG_STATE_ACK_DETECT;
2195
2196                 /* fallthru */
2197         case ANEG_STATE_ACK_DETECT:
2198                 if (ap->ack_match != 0) {
2199                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2200                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2201                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2202                         } else {
2203                                 ap->state = ANEG_STATE_AN_ENABLE;
2204                         }
2205                 } else if (ap->ability_match != 0 &&
2206                            ap->rxconfig == 0) {
2207                         ap->state = ANEG_STATE_AN_ENABLE;
2208                 }
2209                 break;
2210
2211         case ANEG_STATE_COMPLETE_ACK_INIT:
2212                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2213                         ret = ANEG_FAILED;
2214                         break;
2215                 }
2216                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2217                                MR_LP_ADV_HALF_DUPLEX |
2218                                MR_LP_ADV_SYM_PAUSE |
2219                                MR_LP_ADV_ASYM_PAUSE |
2220                                MR_LP_ADV_REMOTE_FAULT1 |
2221                                MR_LP_ADV_REMOTE_FAULT2 |
2222                                MR_LP_ADV_NEXT_PAGE |
2223                                MR_TOGGLE_RX |
2224                                MR_NP_RX);
2225                 if (ap->rxconfig & ANEG_CFG_FD)
2226                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2227                 if (ap->rxconfig & ANEG_CFG_HD)
2228                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2229                 if (ap->rxconfig & ANEG_CFG_PS1)
2230                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2231                 if (ap->rxconfig & ANEG_CFG_PS2)
2232                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2233                 if (ap->rxconfig & ANEG_CFG_RF1)
2234                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2235                 if (ap->rxconfig & ANEG_CFG_RF2)
2236                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2237                 if (ap->rxconfig & ANEG_CFG_NP)
2238                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2239
2240                 ap->link_time = ap->cur_time;
2241
2242                 ap->flags ^= (MR_TOGGLE_TX);
2243                 if (ap->rxconfig & 0x0008)
2244                         ap->flags |= MR_TOGGLE_RX;
2245                 if (ap->rxconfig & ANEG_CFG_NP)
2246                         ap->flags |= MR_NP_RX;
2247                 ap->flags |= MR_PAGE_RX;
2248
2249                 ap->state = ANEG_STATE_COMPLETE_ACK;
2250                 ret = ANEG_TIMER_ENAB;
2251                 break;
2252
2253         case ANEG_STATE_COMPLETE_ACK:
2254                 if (ap->ability_match != 0 &&
2255                     ap->rxconfig == 0) {
2256                         ap->state = ANEG_STATE_AN_ENABLE;
2257                         break;
2258                 }
2259                 delta = ap->cur_time - ap->link_time;
2260                 if (delta > ANEG_STATE_SETTLE_TIME) {
2261                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2262                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2263                         } else {
2264                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2265                                     !(ap->flags & MR_NP_RX)) {
2266                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2267                                 } else {
2268                                         ret = ANEG_FAILED;
2269                                 }
2270                         }
2271                 }
2272                 break;
2273
2274         case ANEG_STATE_IDLE_DETECT_INIT:
2275                 ap->link_time = ap->cur_time;
2276                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2277                 tw32_f(MAC_MODE, tp->mac_mode);
2278                 udelay(40);
2279
2280                 ap->state = ANEG_STATE_IDLE_DETECT;
2281                 ret = ANEG_TIMER_ENAB;
2282                 break;
2283
2284         case ANEG_STATE_IDLE_DETECT:
2285                 if (ap->ability_match != 0 &&
2286                     ap->rxconfig == 0) {
2287                         ap->state = ANEG_STATE_AN_ENABLE;
2288                         break;
2289                 }
2290                 delta = ap->cur_time - ap->link_time;
2291                 if (delta > ANEG_STATE_SETTLE_TIME) {
2292                         /* XXX another gem from the Broadcom driver :( */
2293                         ap->state = ANEG_STATE_LINK_OK;
2294                 }
2295                 break;
2296
2297         case ANEG_STATE_LINK_OK:
2298                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2299                 ret = ANEG_DONE;
2300                 break;
2301
2302         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2303                 /* ??? unimplemented */
2304                 break;
2305
2306         case ANEG_STATE_NEXT_PAGE_WAIT:
2307                 /* ??? unimplemented */
2308                 break;
2309
2310         default:
2311                 ret = ANEG_FAILED;
2312                 break;
2313         };
2314
2315         return ret;
2316 }
2317
2318 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2319 {
2320         int res = 0;
2321         struct tg3_fiber_aneginfo aninfo;
2322         int status = ANEG_FAILED;
2323         unsigned int tick;
2324         u32 tmp;
2325
2326         tw32_f(MAC_TX_AUTO_NEG, 0);
2327
2328         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2329         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2330         udelay(40);
2331
2332         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2333         udelay(40);
2334
2335         memset(&aninfo, 0, sizeof(aninfo));
2336         aninfo.flags |= MR_AN_ENABLE;
2337         aninfo.state = ANEG_STATE_UNKNOWN;
2338         aninfo.cur_time = 0;
2339         tick = 0;
2340         while (++tick < 195000) {
2341                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2342                 if (status == ANEG_DONE || status == ANEG_FAILED)
2343                         break;
2344
2345                 udelay(1);
2346         }
2347
2348         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2349         tw32_f(MAC_MODE, tp->mac_mode);
2350         udelay(40);
2351
2352         *flags = aninfo.flags;
2353
2354         if (status == ANEG_DONE &&
2355             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2356                              MR_LP_ADV_FULL_DUPLEX)))
2357                 res = 1;
2358
2359         return res;
2360 }
2361
2362 static void tg3_init_bcm8002(struct tg3 *tp)
2363 {
2364         u32 mac_status = tr32(MAC_STATUS);
2365         int i;
2366
2367         /* Reset when initting first time or we have a link. */
2368         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2369             !(mac_status & MAC_STATUS_PCS_SYNCED))
2370                 return;
2371
2372         /* Set PLL lock range. */
2373         tg3_writephy(tp, 0x16, 0x8007);
2374
2375         /* SW reset */
2376         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2377
2378         /* Wait for reset to complete. */
2379         /* XXX schedule_timeout() ... */
2380         for (i = 0; i < 500; i++)
2381                 udelay(10);
2382
2383         /* Config mode; select PMA/Ch 1 regs. */
2384         tg3_writephy(tp, 0x10, 0x8411);
2385
2386         /* Enable auto-lock and comdet, select txclk for tx. */
2387         tg3_writephy(tp, 0x11, 0x0a10);
2388
2389         tg3_writephy(tp, 0x18, 0x00a0);
2390         tg3_writephy(tp, 0x16, 0x41ff);
2391
2392         /* Assert and deassert POR. */
2393         tg3_writephy(tp, 0x13, 0x0400);
2394         udelay(40);
2395         tg3_writephy(tp, 0x13, 0x0000);
2396
2397         tg3_writephy(tp, 0x11, 0x0a50);
2398         udelay(40);
2399         tg3_writephy(tp, 0x11, 0x0a10);
2400
2401         /* Wait for signal to stabilize */
2402         /* XXX schedule_timeout() ... */
2403         for (i = 0; i < 15000; i++)
2404                 udelay(10);
2405
2406         /* Deselect the channel register so we can read the PHYID
2407          * later.
2408          */
2409         tg3_writephy(tp, 0x10, 0x8011);
2410 }
2411
2412 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2413 {
2414         u32 sg_dig_ctrl, sg_dig_status;
2415         u32 serdes_cfg, expected_sg_dig_ctrl;
2416         int workaround, port_a;
2417         int current_link_up;
2418
2419         serdes_cfg = 0;
2420         expected_sg_dig_ctrl = 0;
2421         workaround = 0;
2422         port_a = 1;
2423         current_link_up = 0;
2424
2425         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2426             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2427                 workaround = 1;
2428                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2429                         port_a = 0;
2430
2431                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2432                 /* preserve bits 20-23 for voltage regulator */
2433                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2434         }
2435
2436         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2437
2438         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2439                 if (sg_dig_ctrl & (1 << 31)) {
2440                         if (workaround) {
2441                                 u32 val = serdes_cfg;
2442
2443                                 if (port_a)
2444                                         val |= 0xc010000;
2445                                 else
2446                                         val |= 0x4010000;
2447                                 tw32_f(MAC_SERDES_CFG, val);
2448                         }
2449                         tw32_f(SG_DIG_CTRL, 0x01388400);
2450                 }
2451                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2452                         tg3_setup_flow_control(tp, 0, 0);
2453                         current_link_up = 1;
2454                 }
2455                 goto out;
2456         }
2457
2458         /* Want auto-negotiation.  */
2459         expected_sg_dig_ctrl = 0x81388400;
2460
2461         /* Pause capability */
2462         expected_sg_dig_ctrl |= (1 << 11);
2463
2464         /* Asymettric pause */
2465         expected_sg_dig_ctrl |= (1 << 12);
2466
2467         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2468                 if (workaround)
2469                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2470                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2471                 udelay(5);
2472                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2473
2474                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2475         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2476                                  MAC_STATUS_SIGNAL_DET)) {
2477                 int i;
2478
2479                 /* Giver time to negotiate (~200ms) */
2480                 for (i = 0; i < 40000; i++) {
2481                         sg_dig_status = tr32(SG_DIG_STATUS);
2482                         if (sg_dig_status & (0x3))
2483                                 break;
2484                         udelay(5);
2485                 }
2486                 mac_status = tr32(MAC_STATUS);
2487
2488                 if ((sg_dig_status & (1 << 1)) &&
2489                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2490                         u32 local_adv, remote_adv;
2491
2492                         local_adv = ADVERTISE_PAUSE_CAP;
2493                         remote_adv = 0;
2494                         if (sg_dig_status & (1 << 19))
2495                                 remote_adv |= LPA_PAUSE_CAP;
2496                         if (sg_dig_status & (1 << 20))
2497                                 remote_adv |= LPA_PAUSE_ASYM;
2498
2499                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2500                         current_link_up = 1;
2501                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2502                 } else if (!(sg_dig_status & (1 << 1))) {
2503                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2504                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2505                         else {
2506                                 if (workaround) {
2507                                         u32 val = serdes_cfg;
2508
2509                                         if (port_a)
2510                                                 val |= 0xc010000;
2511                                         else
2512                                                 val |= 0x4010000;
2513
2514                                         tw32_f(MAC_SERDES_CFG, val);
2515                                 }
2516
2517                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2518                                 udelay(40);
2519
2520                                 /* Link parallel detection - link is up */
2521                                 /* only if we have PCS_SYNC and not */
2522                                 /* receiving config code words */
2523                                 mac_status = tr32(MAC_STATUS);
2524                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2525                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2526                                         tg3_setup_flow_control(tp, 0, 0);
2527                                         current_link_up = 1;
2528                                 }
2529                         }
2530                 }
2531         }
2532
2533 out:
2534         return current_link_up;
2535 }
2536
2537 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2538 {
2539         int current_link_up = 0;
2540
2541         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2542                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2543                 goto out;
2544         }
2545
2546         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2547                 u32 flags;
2548                 int i;
2549   
2550                 if (fiber_autoneg(tp, &flags)) {
2551                         u32 local_adv, remote_adv;
2552
2553                         local_adv = ADVERTISE_PAUSE_CAP;
2554                         remote_adv = 0;
2555                         if (flags & MR_LP_ADV_SYM_PAUSE)
2556                                 remote_adv |= LPA_PAUSE_CAP;
2557                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2558                                 remote_adv |= LPA_PAUSE_ASYM;
2559
2560                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2561
2562                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2563                         current_link_up = 1;
2564                 }
2565                 for (i = 0; i < 30; i++) {
2566                         udelay(20);
2567                         tw32_f(MAC_STATUS,
2568                                (MAC_STATUS_SYNC_CHANGED |
2569                                 MAC_STATUS_CFG_CHANGED));
2570                         udelay(40);
2571                         if ((tr32(MAC_STATUS) &
2572                              (MAC_STATUS_SYNC_CHANGED |
2573                               MAC_STATUS_CFG_CHANGED)) == 0)
2574                                 break;
2575                 }
2576
2577                 mac_status = tr32(MAC_STATUS);
2578                 if (current_link_up == 0 &&
2579                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2580                     !(mac_status & MAC_STATUS_RCVD_CFG))
2581                         current_link_up = 1;
2582         } else {
2583                 /* Forcing 1000FD link up. */
2584                 current_link_up = 1;
2585                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2586
2587                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2588                 udelay(40);
2589         }
2590
2591 out:
2592         return current_link_up;
2593 }
2594
2595 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2596 {
2597         u32 orig_pause_cfg;
2598         u16 orig_active_speed;
2599         u8 orig_active_duplex;
2600         u32 mac_status;
2601         int current_link_up;
2602         int i;
2603
2604         orig_pause_cfg =
2605                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2606                                   TG3_FLAG_TX_PAUSE));
2607         orig_active_speed = tp->link_config.active_speed;
2608         orig_active_duplex = tp->link_config.active_duplex;
2609
2610         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2611             netif_carrier_ok(tp->dev) &&
2612             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2613                 mac_status = tr32(MAC_STATUS);
2614                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2615                                MAC_STATUS_SIGNAL_DET |
2616                                MAC_STATUS_CFG_CHANGED |
2617                                MAC_STATUS_RCVD_CFG);
2618                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2619                                    MAC_STATUS_SIGNAL_DET)) {
2620                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2621                                             MAC_STATUS_CFG_CHANGED));
2622                         return 0;
2623                 }
2624         }
2625
2626         tw32_f(MAC_TX_AUTO_NEG, 0);
2627
2628         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2629         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2630         tw32_f(MAC_MODE, tp->mac_mode);
2631         udelay(40);
2632
2633         if (tp->phy_id == PHY_ID_BCM8002)
2634                 tg3_init_bcm8002(tp);
2635
2636         /* Enable link change event even when serdes polling.  */
2637         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2638         udelay(40);
2639
2640         current_link_up = 0;
2641         mac_status = tr32(MAC_STATUS);
2642
2643         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2644                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2645         else
2646                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2647
2648         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2649         tw32_f(MAC_MODE, tp->mac_mode);
2650         udelay(40);
2651
2652         tp->hw_status->status =
2653                 (SD_STATUS_UPDATED |
2654                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2655
2656         for (i = 0; i < 100; i++) {
2657                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2658                                     MAC_STATUS_CFG_CHANGED));
2659                 udelay(5);
2660                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2661                                          MAC_STATUS_CFG_CHANGED)) == 0)
2662                         break;
2663         }
2664
2665         mac_status = tr32(MAC_STATUS);
2666         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2667                 current_link_up = 0;
2668                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2669                         tw32_f(MAC_MODE, (tp->mac_mode |
2670                                           MAC_MODE_SEND_CONFIGS));
2671                         udelay(1);
2672                         tw32_f(MAC_MODE, tp->mac_mode);
2673                 }
2674         }
2675
2676         if (current_link_up == 1) {
2677                 tp->link_config.active_speed = SPEED_1000;
2678                 tp->link_config.active_duplex = DUPLEX_FULL;
2679                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2680                                     LED_CTRL_LNKLED_OVERRIDE |
2681                                     LED_CTRL_1000MBPS_ON));
2682         } else {
2683                 tp->link_config.active_speed = SPEED_INVALID;
2684                 tp->link_config.active_duplex = DUPLEX_INVALID;
2685                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2686                                     LED_CTRL_LNKLED_OVERRIDE |
2687                                     LED_CTRL_TRAFFIC_OVERRIDE));
2688         }
2689
2690         if (current_link_up != netif_carrier_ok(tp->dev)) {
2691                 if (current_link_up)
2692                         netif_carrier_on(tp->dev);
2693                 else
2694                         netif_carrier_off(tp->dev);
2695                 tg3_link_report(tp);
2696         } else {
2697                 u32 now_pause_cfg =
2698                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2699                                          TG3_FLAG_TX_PAUSE);
2700                 if (orig_pause_cfg != now_pause_cfg ||
2701                     orig_active_speed != tp->link_config.active_speed ||
2702                     orig_active_duplex != tp->link_config.active_duplex)
2703                         tg3_link_report(tp);
2704         }
2705
2706         return 0;
2707 }
2708
2709 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2710 {
2711         int current_link_up, err = 0;
2712         u32 bmsr, bmcr;
2713         u16 current_speed;
2714         u8 current_duplex;
2715
2716         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2717         tw32_f(MAC_MODE, tp->mac_mode);
2718         udelay(40);
2719
2720         tw32(MAC_EVENT, 0);
2721
2722         tw32_f(MAC_STATUS,
2723              (MAC_STATUS_SYNC_CHANGED |
2724               MAC_STATUS_CFG_CHANGED |
2725               MAC_STATUS_MI_COMPLETION |
2726               MAC_STATUS_LNKSTATE_CHANGED));
2727         udelay(40);
2728
2729         if (force_reset)
2730                 tg3_phy_reset(tp);
2731
2732         current_link_up = 0;
2733         current_speed = SPEED_INVALID;
2734         current_duplex = DUPLEX_INVALID;
2735
2736         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2737         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2738         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2739                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2740                         bmsr |= BMSR_LSTATUS;
2741                 else
2742                         bmsr &= ~BMSR_LSTATUS;
2743         }
2744
2745         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2746
2747         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2748             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2749                 /* do nothing, just check for link up at the end */
2750         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2751                 u32 adv, new_adv;
2752
2753                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2754                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2755                                   ADVERTISE_1000XPAUSE |
2756                                   ADVERTISE_1000XPSE_ASYM |
2757                                   ADVERTISE_SLCT);
2758
2759                 /* Always advertise symmetric PAUSE just like copper */
2760                 new_adv |= ADVERTISE_1000XPAUSE;
2761
2762                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2763                         new_adv |= ADVERTISE_1000XHALF;
2764                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2765                         new_adv |= ADVERTISE_1000XFULL;
2766
2767                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2768                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2769                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2770                         tg3_writephy(tp, MII_BMCR, bmcr);
2771
2772                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2773                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2774                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2775
2776                         return err;
2777                 }
2778         } else {
2779                 u32 new_bmcr;
2780
2781                 bmcr &= ~BMCR_SPEED1000;
2782                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2783
2784                 if (tp->link_config.duplex == DUPLEX_FULL)
2785                         new_bmcr |= BMCR_FULLDPLX;
2786
2787                 if (new_bmcr != bmcr) {
2788                         /* BMCR_SPEED1000 is a reserved bit that needs
2789                          * to be set on write.
2790                          */
2791                         new_bmcr |= BMCR_SPEED1000;
2792
2793                         /* Force a linkdown */
2794                         if (netif_carrier_ok(tp->dev)) {
2795                                 u32 adv;
2796
2797                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2798                                 adv &= ~(ADVERTISE_1000XFULL |
2799                                          ADVERTISE_1000XHALF |
2800                                          ADVERTISE_SLCT);
2801                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2802                                 tg3_writephy(tp, MII_BMCR, bmcr |
2803                                                            BMCR_ANRESTART |
2804                                                            BMCR_ANENABLE);
2805                                 udelay(10);
2806                                 netif_carrier_off(tp->dev);
2807                         }
2808                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2809                         bmcr = new_bmcr;
2810                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2811                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2812                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2813                             ASIC_REV_5714) {
2814                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2815                                         bmsr |= BMSR_LSTATUS;
2816                                 else
2817                                         bmsr &= ~BMSR_LSTATUS;
2818                         }
2819                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2820                 }
2821         }
2822
2823         if (bmsr & BMSR_LSTATUS) {
2824                 current_speed = SPEED_1000;
2825                 current_link_up = 1;
2826                 if (bmcr & BMCR_FULLDPLX)
2827                         current_duplex = DUPLEX_FULL;
2828                 else
2829                         current_duplex = DUPLEX_HALF;
2830
2831                 if (bmcr & BMCR_ANENABLE) {
2832                         u32 local_adv, remote_adv, common;
2833
2834                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2835                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2836                         common = local_adv & remote_adv;
2837                         if (common & (ADVERTISE_1000XHALF |
2838                                       ADVERTISE_1000XFULL)) {
2839                                 if (common & ADVERTISE_1000XFULL)
2840                                         current_duplex = DUPLEX_FULL;
2841                                 else
2842                                         current_duplex = DUPLEX_HALF;
2843
2844                                 tg3_setup_flow_control(tp, local_adv,
2845                                                        remote_adv);
2846                         }
2847                         else
2848                                 current_link_up = 0;
2849                 }
2850         }
2851
2852         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2853         if (tp->link_config.active_duplex == DUPLEX_HALF)
2854                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2855
2856         tw32_f(MAC_MODE, tp->mac_mode);
2857         udelay(40);
2858
2859         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2860
2861         tp->link_config.active_speed = current_speed;
2862         tp->link_config.active_duplex = current_duplex;
2863
2864         if (current_link_up != netif_carrier_ok(tp->dev)) {
2865                 if (current_link_up)
2866                         netif_carrier_on(tp->dev);
2867                 else {
2868                         netif_carrier_off(tp->dev);
2869                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2870                 }
2871                 tg3_link_report(tp);
2872         }
2873         return err;
2874 }
2875
2876 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2877 {
2878         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2879                 /* Give autoneg time to complete. */
2880                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2881                 return;
2882         }
2883         if (!netif_carrier_ok(tp->dev) &&
2884             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2885                 u32 bmcr;
2886
2887                 tg3_readphy(tp, MII_BMCR, &bmcr);
2888                 if (bmcr & BMCR_ANENABLE) {
2889                         u32 phy1, phy2;
2890
2891                         /* Select shadow register 0x1f */
2892                         tg3_writephy(tp, 0x1c, 0x7c00);
2893                         tg3_readphy(tp, 0x1c, &phy1);
2894
2895                         /* Select expansion interrupt status register */
2896                         tg3_writephy(tp, 0x17, 0x0f01);
2897                         tg3_readphy(tp, 0x15, &phy2);
2898                         tg3_readphy(tp, 0x15, &phy2);
2899
2900                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2901                                 /* We have signal detect and not receiving
2902                                  * config code words, link is up by parallel
2903                                  * detection.
2904                                  */
2905
2906                                 bmcr &= ~BMCR_ANENABLE;
2907                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2908                                 tg3_writephy(tp, MII_BMCR, bmcr);
2909                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2910                         }
2911                 }
2912         }
2913         else if (netif_carrier_ok(tp->dev) &&
2914                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2915                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2916                 u32 phy2;
2917
2918                 /* Select expansion interrupt status register */
2919                 tg3_writephy(tp, 0x17, 0x0f01);
2920                 tg3_readphy(tp, 0x15, &phy2);
2921                 if (phy2 & 0x20) {
2922                         u32 bmcr;
2923
2924                         /* Config code words received, turn on autoneg. */
2925                         tg3_readphy(tp, MII_BMCR, &bmcr);
2926                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2927
2928                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2929
2930                 }
2931         }
2932 }
2933
2934 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2935 {
2936         int err;
2937
2938         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2939                 err = tg3_setup_fiber_phy(tp, force_reset);
2940         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2941                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2942         } else {
2943                 err = tg3_setup_copper_phy(tp, force_reset);
2944         }
2945
2946         if (tp->link_config.active_speed == SPEED_1000 &&
2947             tp->link_config.active_duplex == DUPLEX_HALF)
2948                 tw32(MAC_TX_LENGTHS,
2949                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2950                       (6 << TX_LENGTHS_IPG_SHIFT) |
2951                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2952         else
2953                 tw32(MAC_TX_LENGTHS,
2954                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2955                       (6 << TX_LENGTHS_IPG_SHIFT) |
2956                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2957
2958         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2959                 if (netif_carrier_ok(tp->dev)) {
2960                         tw32(HOSTCC_STAT_COAL_TICKS,
2961                              tp->coal.stats_block_coalesce_usecs);
2962                 } else {
2963                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2964                 }
2965         }
2966
2967         return err;
2968 }
2969
2970 /* This is called whenever we suspect that the system chipset is re-
2971  * ordering the sequence of MMIO to the tx send mailbox. The symptom
2972  * is bogus tx completions. We try to recover by setting the
2973  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2974  * in the workqueue.
2975  */
2976 static void tg3_tx_recover(struct tg3 *tp)
2977 {
2978         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
2979                tp->write32_tx_mbox == tg3_write_indirect_mbox);
2980
2981         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
2982                "mapped I/O cycles to the network device, attempting to "
2983                "recover. Please report the problem to the driver maintainer "
2984                "and include system chipset information.\n", tp->dev->name);
2985
2986         spin_lock(&tp->lock);
2987         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
2988         spin_unlock(&tp->lock);
2989 }
2990
2991 /* Tigon3 never reports partial packet sends.  So we do not
2992  * need special logic to handle SKBs that have not had all
2993  * of their frags sent yet, like SunGEM does.
2994  */
2995 static void tg3_tx(struct tg3 *tp)
2996 {
2997         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2998         u32 sw_idx = tp->tx_cons;
2999
3000         while (sw_idx != hw_idx) {
3001                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3002                 struct sk_buff *skb = ri->skb;
3003                 int i, tx_bug = 0;
3004
3005                 if (unlikely(skb == NULL)) {
3006                         tg3_tx_recover(tp);
3007                         return;
3008                 }
3009
3010                 pci_unmap_single(tp->pdev,
3011                                  pci_unmap_addr(ri, mapping),
3012                                  skb_headlen(skb),
3013                                  PCI_DMA_TODEVICE);
3014
3015                 ri->skb = NULL;
3016
3017                 sw_idx = NEXT_TX(sw_idx);
3018
3019                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3020                         ri = &tp->tx_buffers[sw_idx];
3021                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3022                                 tx_bug = 1;
3023
3024                         pci_unmap_page(tp->pdev,
3025                                        pci_unmap_addr(ri, mapping),
3026                                        skb_shinfo(skb)->frags[i].size,
3027                                        PCI_DMA_TODEVICE);
3028
3029                         sw_idx = NEXT_TX(sw_idx);
3030                 }
3031
3032                 dev_kfree_skb(skb);
3033
3034                 if (unlikely(tx_bug)) {
3035                         tg3_tx_recover(tp);
3036                         return;
3037                 }
3038         }
3039
3040         tp->tx_cons = sw_idx;
3041
3042         if (unlikely(netif_queue_stopped(tp->dev))) {
3043                 spin_lock(&tp->tx_lock);
3044                 if (netif_queue_stopped(tp->dev) &&
3045                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
3046                         netif_wake_queue(tp->dev);
3047                 spin_unlock(&tp->tx_lock);
3048         }
3049 }
3050
3051 /* Returns size of skb allocated or < 0 on error.
3052  *
3053  * We only need to fill in the address because the other members
3054  * of the RX descriptor are invariant, see tg3_init_rings.
3055  *
3056  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3057  * posting buffers we only dirty the first cache line of the RX
3058  * descriptor (containing the address).  Whereas for the RX status
3059  * buffers the cpu only reads the last cacheline of the RX descriptor
3060  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3061  */
3062 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3063                             int src_idx, u32 dest_idx_unmasked)
3064 {
3065         struct tg3_rx_buffer_desc *desc;
3066         struct ring_info *map, *src_map;
3067         struct sk_buff *skb;
3068         dma_addr_t mapping;
3069         int skb_size, dest_idx;
3070
3071         src_map = NULL;
3072         switch (opaque_key) {
3073         case RXD_OPAQUE_RING_STD:
3074                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3075                 desc = &tp->rx_std[dest_idx];
3076                 map = &tp->rx_std_buffers[dest_idx];
3077                 if (src_idx >= 0)
3078                         src_map = &tp->rx_std_buffers[src_idx];
3079                 skb_size = tp->rx_pkt_buf_sz;
3080                 break;
3081
3082         case RXD_OPAQUE_RING_JUMBO:
3083                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3084                 desc = &tp->rx_jumbo[dest_idx];
3085                 map = &tp->rx_jumbo_buffers[dest_idx];
3086                 if (src_idx >= 0)
3087                         src_map = &tp->rx_jumbo_buffers[src_idx];
3088                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3089                 break;
3090
3091         default:
3092                 return -EINVAL;
3093         };
3094
3095         /* Do not overwrite any of the map or rp information
3096          * until we are sure we can commit to a new buffer.
3097          *
3098          * Callers depend upon this behavior and assume that
3099          * we leave everything unchanged if we fail.
3100          */
3101         skb = dev_alloc_skb(skb_size);
3102         if (skb == NULL)
3103                 return -ENOMEM;
3104
3105         skb->dev = tp->dev;
3106         skb_reserve(skb, tp->rx_offset);
3107
3108         mapping = pci_map_single(tp->pdev, skb->data,
3109                                  skb_size - tp->rx_offset,
3110                                  PCI_DMA_FROMDEVICE);
3111
3112         map->skb = skb;
3113         pci_unmap_addr_set(map, mapping, mapping);
3114
3115         if (src_map != NULL)
3116                 src_map->skb = NULL;
3117
3118         desc->addr_hi = ((u64)mapping >> 32);
3119         desc->addr_lo = ((u64)mapping & 0xffffffff);
3120
3121         return skb_size;
3122 }
3123
3124 /* We only need to move over in the address because the other
3125  * members of the RX descriptor are invariant.  See notes above
3126  * tg3_alloc_rx_skb for full details.
3127  */
3128 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3129                            int src_idx, u32 dest_idx_unmasked)
3130 {
3131         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3132         struct ring_info *src_map, *dest_map;
3133         int dest_idx;
3134
3135         switch (opaque_key) {
3136         case RXD_OPAQUE_RING_STD:
3137                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3138                 dest_desc = &tp->rx_std[dest_idx];
3139                 dest_map = &tp->rx_std_buffers[dest_idx];
3140                 src_desc = &tp->rx_std[src_idx];
3141                 src_map = &tp->rx_std_buffers[src_idx];
3142                 break;
3143
3144         case RXD_OPAQUE_RING_JUMBO:
3145                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3146                 dest_desc = &tp->rx_jumbo[dest_idx];
3147                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3148                 src_desc = &tp->rx_jumbo[src_idx];
3149                 src_map = &tp->rx_jumbo_buffers[src_idx];
3150                 break;
3151
3152         default:
3153                 return;
3154         };
3155
3156         dest_map->skb = src_map->skb;
3157         pci_unmap_addr_set(dest_map, mapping,
3158                            pci_unmap_addr(src_map, mapping));
3159         dest_desc->addr_hi = src_desc->addr_hi;
3160         dest_desc->addr_lo = src_desc->addr_lo;
3161
3162         src_map->skb = NULL;
3163 }
3164
3165 #if TG3_VLAN_TAG_USED
3166 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3167 {
3168         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3169 }
3170 #endif
3171
3172 /* The RX ring scheme is composed of multiple rings which post fresh
3173  * buffers to the chip, and one special ring the chip uses to report
3174  * status back to the host.
3175  *
3176  * The special ring reports the status of received packets to the
3177  * host.  The chip does not write into the original descriptor the
3178  * RX buffer was obtained from.  The chip simply takes the original
3179  * descriptor as provided by the host, updates the status and length
3180  * field, then writes this into the next status ring entry.
3181  *
3182  * Each ring the host uses to post buffers to the chip is described
3183  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3184  * it is first placed into the on-chip ram.  When the packet's length
3185  * is known, it walks down the TG3_BDINFO entries to select the ring.
3186  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3187  * which is within the range of the new packet's length is chosen.
3188  *
3189  * The "separate ring for rx status" scheme may sound queer, but it makes
3190  * sense from a cache coherency perspective.  If only the host writes
3191  * to the buffer post rings, and only the chip writes to the rx status
3192  * rings, then cache lines never move beyond shared-modified state.
3193  * If both the host and chip were to write into the same ring, cache line
3194  * eviction could occur since both entities want it in an exclusive state.
3195  */
3196 static int tg3_rx(struct tg3 *tp, int budget)
3197 {
3198         u32 work_mask;
3199         u32 sw_idx = tp->rx_rcb_ptr;
3200         u16 hw_idx;
3201         int received;
3202
3203         hw_idx = tp->hw_status->idx[0].rx_producer;
3204         /*
3205          * We need to order the read of hw_idx and the read of
3206          * the opaque cookie.
3207          */
3208         rmb();
3209         work_mask = 0;
3210         received = 0;
3211         while (sw_idx != hw_idx && budget > 0) {
3212                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3213                 unsigned int len;
3214                 struct sk_buff *skb;
3215                 dma_addr_t dma_addr;
3216                 u32 opaque_key, desc_idx, *post_ptr;
3217
3218                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3219                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3220                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3221                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3222                                                   mapping);
3223                         skb = tp->rx_std_buffers[desc_idx].skb;
3224                         post_ptr = &tp->rx_std_ptr;
3225                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3226                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3227                                                   mapping);
3228                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3229                         post_ptr = &tp->rx_jumbo_ptr;
3230                 }
3231                 else {
3232                         goto next_pkt_nopost;
3233                 }
3234
3235                 work_mask |= opaque_key;
3236
3237                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3238                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3239                 drop_it:
3240                         tg3_recycle_rx(tp, opaque_key,
3241                                        desc_idx, *post_ptr);
3242                 drop_it_no_recycle:
3243                         /* Other statistics kept track of by card. */
3244                         tp->net_stats.rx_dropped++;
3245                         goto next_pkt;
3246                 }
3247
3248                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3249
3250                 if (len > RX_COPY_THRESHOLD 
3251                         && tp->rx_offset == 2
3252                         /* rx_offset != 2 iff this is a 5701 card running
3253                          * in PCI-X mode [see tg3_get_invariants()] */
3254                 ) {
3255                         int skb_size;
3256
3257                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3258                                                     desc_idx, *post_ptr);
3259                         if (skb_size < 0)
3260                                 goto drop_it;
3261
3262                         pci_unmap_single(tp->pdev, dma_addr,
3263                                          skb_size - tp->rx_offset,
3264                                          PCI_DMA_FROMDEVICE);
3265
3266                         skb_put(skb, len);
3267                 } else {
3268                         struct sk_buff *copy_skb;
3269
3270                         tg3_recycle_rx(tp, opaque_key,
3271                                        desc_idx, *post_ptr);
3272
3273                         copy_skb = dev_alloc_skb(len + 2);
3274                         if (copy_skb == NULL)
3275                                 goto drop_it_no_recycle;
3276
3277                         copy_skb->dev = tp->dev;
3278                         skb_reserve(copy_skb, 2);
3279                         skb_put(copy_skb, len);
3280                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3281                         memcpy(copy_skb->data, skb->data, len);
3282                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3283
3284                         /* We'll reuse the original ring buffer. */
3285                         skb = copy_skb;
3286                 }
3287
3288                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3289                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3290                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3291                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3292                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3293                 else
3294                         skb->ip_summed = CHECKSUM_NONE;
3295
3296                 skb->protocol = eth_type_trans(skb, tp->dev);
3297 #if TG3_VLAN_TAG_USED
3298                 if (tp->vlgrp != NULL &&
3299                     desc->type_flags & RXD_FLAG_VLAN) {
3300                         tg3_vlan_rx(tp, skb,
3301                                     desc->err_vlan & RXD_VLAN_MASK);
3302                 } else
3303 #endif
3304                         netif_receive_skb(skb);
3305
3306                 tp->dev->last_rx = jiffies;
3307                 received++;
3308                 budget--;
3309
3310 next_pkt:
3311                 (*post_ptr)++;
3312 next_pkt_nopost:
3313                 sw_idx++;
3314                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3315
3316                 /* Refresh hw_idx to see if there is new work */
3317                 if (sw_idx == hw_idx) {
3318                         hw_idx = tp->hw_status->idx[0].rx_producer;
3319                         rmb();
3320                 }
3321         }
3322
3323         /* ACK the status ring. */
3324         tp->rx_rcb_ptr = sw_idx;
3325         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3326
3327         /* Refill RX ring(s). */
3328         if (work_mask & RXD_OPAQUE_RING_STD) {
3329                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3330                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3331                              sw_idx);
3332         }
3333         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3334                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3335                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3336                              sw_idx);
3337         }
3338         mmiowb();
3339
3340         return received;
3341 }
3342
3343 static int tg3_poll(struct net_device *netdev, int *budget)
3344 {
3345         struct tg3 *tp = netdev_priv(netdev);
3346         struct tg3_hw_status *sblk = tp->hw_status;
3347         int done;
3348
3349         /* handle link change and other phy events */
3350         if (!(tp->tg3_flags &
3351               (TG3_FLAG_USE_LINKCHG_REG |
3352                TG3_FLAG_POLL_SERDES))) {
3353                 if (sblk->status & SD_STATUS_LINK_CHG) {
3354                         sblk->status = SD_STATUS_UPDATED |
3355                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3356                         spin_lock(&tp->lock);
3357                         tg3_setup_phy(tp, 0);
3358                         spin_unlock(&tp->lock);
3359                 }
3360         }
3361
3362         /* run TX completion thread */
3363         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3364                 tg3_tx(tp);
3365                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3366                         netif_rx_complete(netdev);
3367                         schedule_work(&tp->reset_task);
3368                         return 0;
3369                 }
3370         }
3371
3372         /* run RX thread, within the bounds set by NAPI.
3373          * All RX "locking" is done by ensuring outside
3374          * code synchronizes with dev->poll()
3375          */
3376         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3377                 int orig_budget = *budget;
3378                 int work_done;
3379
3380                 if (orig_budget > netdev->quota)
3381                         orig_budget = netdev->quota;
3382
3383                 work_done = tg3_rx(tp, orig_budget);
3384
3385                 *budget -= work_done;
3386                 netdev->quota -= work_done;
3387         }
3388
3389         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3390                 tp->last_tag = sblk->status_tag;
3391                 rmb();
3392         } else
3393                 sblk->status &= ~SD_STATUS_UPDATED;
3394
3395         /* if no more work, tell net stack and NIC we're done */
3396         done = !tg3_has_work(tp);
3397         if (done) {
3398                 netif_rx_complete(netdev);
3399                 tg3_restart_ints(tp);
3400         }
3401
3402         return (done ? 0 : 1);
3403 }
3404
3405 static void tg3_irq_quiesce(struct tg3 *tp)
3406 {
3407         BUG_ON(tp->irq_sync);
3408
3409         tp->irq_sync = 1;
3410         smp_mb();
3411
3412         synchronize_irq(tp->pdev->irq);
3413 }
3414
3415 static inline int tg3_irq_sync(struct tg3 *tp)
3416 {
3417         return tp->irq_sync;
3418 }
3419
3420 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3421  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3422  * with as well.  Most of the time, this is not necessary except when
3423  * shutting down the device.
3424  */
3425 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3426 {
3427         if (irq_sync)
3428                 tg3_irq_quiesce(tp);
3429         spin_lock_bh(&tp->lock);
3430 }
3431
3432 static inline void tg3_full_unlock(struct tg3 *tp)
3433 {
3434         spin_unlock_bh(&tp->lock);
3435 }
3436
3437 /* One-shot MSI handler - Chip automatically disables interrupt
3438  * after sending MSI so driver doesn't have to do it.
3439  */
3440 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3441 {
3442         struct net_device *dev = dev_id;
3443         struct tg3 *tp = netdev_priv(dev);
3444
3445         prefetch(tp->hw_status);
3446         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3447
3448         if (likely(!tg3_irq_sync(tp)))
3449                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3450
3451         return IRQ_HANDLED;
3452 }
3453
3454 /* MSI ISR - No need to check for interrupt sharing and no need to
3455  * flush status block and interrupt mailbox. PCI ordering rules
3456  * guarantee that MSI will arrive after the status block.
3457  */
3458 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3459 {
3460         struct net_device *dev = dev_id;
3461         struct tg3 *tp = netdev_priv(dev);
3462
3463         prefetch(tp->hw_status);
3464         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3465         /*
3466          * Writing any value to intr-mbox-0 clears PCI INTA# and
3467          * chip-internal interrupt pending events.
3468          * Writing non-zero to intr-mbox-0 additional tells the
3469          * NIC to stop sending us irqs, engaging "in-intr-handler"
3470          * event coalescing.
3471          */
3472         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3473         if (likely(!tg3_irq_sync(tp)))
3474                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3475
3476         return IRQ_RETVAL(1);
3477 }
3478
3479 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3480 {
3481         struct net_device *dev = dev_id;
3482         struct tg3 *tp = netdev_priv(dev);
3483         struct tg3_hw_status *sblk = tp->hw_status;
3484         unsigned int handled = 1;
3485
3486         /* In INTx mode, it is possible for the interrupt to arrive at
3487          * the CPU before the status block posted prior to the interrupt.
3488          * Reading the PCI State register will confirm whether the
3489          * interrupt is ours and will flush the status block.
3490          */
3491         if ((sblk->status & SD_STATUS_UPDATED) ||
3492             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3493                 /*
3494                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3495                  * chip-internal interrupt pending events.
3496                  * Writing non-zero to intr-mbox-0 additional tells the
3497                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3498                  * event coalescing.
3499                  */
3500                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3501                              0x00000001);
3502                 if (tg3_irq_sync(tp))
3503                         goto out;
3504                 sblk->status &= ~SD_STATUS_UPDATED;
3505                 if (likely(tg3_has_work(tp))) {
3506                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3507                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3508                 } else {
3509                         /* No work, shared interrupt perhaps?  re-enable
3510                          * interrupts, and flush that PCI write
3511                          */
3512                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3513                                 0x00000000);
3514                 }
3515         } else {        /* shared interrupt */
3516                 handled = 0;
3517         }
3518 out:
3519         return IRQ_RETVAL(handled);
3520 }
3521
3522 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3523 {
3524         struct net_device *dev = dev_id;
3525         struct tg3 *tp = netdev_priv(dev);
3526         struct tg3_hw_status *sblk = tp->hw_status;
3527         unsigned int handled = 1;
3528
3529         /* In INTx mode, it is possible for the interrupt to arrive at
3530          * the CPU before the status block posted prior to the interrupt.
3531          * Reading the PCI State register will confirm whether the
3532          * interrupt is ours and will flush the status block.
3533          */
3534         if ((sblk->status_tag != tp->last_tag) ||
3535             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3536                 /*
3537                  * writing any value to intr-mbox-0 clears PCI INTA# and
3538                  * chip-internal interrupt pending events.
3539                  * writing non-zero to intr-mbox-0 additional tells the
3540                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3541                  * event coalescing.
3542                  */
3543                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3544                              0x00000001);
3545                 if (tg3_irq_sync(tp))
3546                         goto out;
3547                 if (netif_rx_schedule_prep(dev)) {
3548                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3549                         /* Update last_tag to mark that this status has been
3550                          * seen. Because interrupt may be shared, we may be
3551                          * racing with tg3_poll(), so only update last_tag
3552                          * if tg3_poll() is not scheduled.
3553                          */
3554                         tp->last_tag = sblk->status_tag;
3555                         __netif_rx_schedule(dev);
3556                 }
3557         } else {        /* shared interrupt */
3558                 handled = 0;
3559         }
3560 out:
3561         return IRQ_RETVAL(handled);
3562 }
3563
3564 /* ISR for interrupt test */
3565 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3566                 struct pt_regs *regs)
3567 {
3568         struct net_device *dev = dev_id;
3569         struct tg3 *tp = netdev_priv(dev);
3570         struct tg3_hw_status *sblk = tp->hw_status;
3571
3572         if ((sblk->status & SD_STATUS_UPDATED) ||
3573             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3574                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3575                              0x00000001);
3576                 return IRQ_RETVAL(1);
3577         }
3578         return IRQ_RETVAL(0);
3579 }
3580
3581 static int tg3_init_hw(struct tg3 *, int);
3582 static int tg3_halt(struct tg3 *, int, int);
3583
3584 #ifdef CONFIG_NET_POLL_CONTROLLER
3585 static void tg3_poll_controller(struct net_device *dev)
3586 {
3587         struct tg3 *tp = netdev_priv(dev);
3588
3589         tg3_interrupt(tp->pdev->irq, dev, NULL);
3590 }
3591 #endif
3592
3593 static void tg3_reset_task(void *_data)
3594 {
3595         struct tg3 *tp = _data;
3596         unsigned int restart_timer;
3597
3598         tg3_full_lock(tp, 0);
3599         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3600
3601         if (!netif_running(tp->dev)) {
3602                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3603                 tg3_full_unlock(tp);
3604                 return;
3605         }
3606
3607         tg3_full_unlock(tp);
3608
3609         tg3_netif_stop(tp);
3610
3611         tg3_full_lock(tp, 1);
3612
3613         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3614         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3615
3616         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3617                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3618                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3619                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3620                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3621         }
3622
3623         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3624         tg3_init_hw(tp, 1);
3625
3626         tg3_netif_start(tp);
3627
3628         if (restart_timer)
3629                 mod_timer(&tp->timer, jiffies + 1);
3630
3631         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3632
3633         tg3_full_unlock(tp);
3634 }
3635
3636 static void tg3_tx_timeout(struct net_device *dev)
3637 {
3638         struct tg3 *tp = netdev_priv(dev);
3639
3640         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3641                dev->name);
3642
3643         schedule_work(&tp->reset_task);
3644 }
3645
3646 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3647 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3648 {
3649         u32 base = (u32) mapping & 0xffffffff;
3650
3651         return ((base > 0xffffdcc0) &&
3652                 (base + len + 8 < base));
3653 }
3654
3655 /* Test for DMA addresses > 40-bit */
3656 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3657                                           int len)
3658 {
3659 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3660         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3661                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3662         return 0;
3663 #else
3664         return 0;
3665 #endif
3666 }
3667
3668 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3669
3670 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3671 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3672                                        u32 last_plus_one, u32 *start,
3673                                        u32 base_flags, u32 mss)
3674 {
3675         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3676         dma_addr_t new_addr = 0;
3677         u32 entry = *start;
3678         int i, ret = 0;
3679
3680         if (!new_skb) {
3681                 ret = -1;
3682         } else {
3683                 /* New SKB is guaranteed to be linear. */
3684                 entry = *start;
3685                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3686                                           PCI_DMA_TODEVICE);
3687                 /* Make sure new skb does not cross any 4G boundaries.
3688                  * Drop the packet if it does.
3689                  */
3690                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3691                         ret = -1;
3692                         dev_kfree_skb(new_skb);
3693                         new_skb = NULL;
3694                 } else {
3695                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3696                                     base_flags, 1 | (mss << 1));
3697                         *start = NEXT_TX(entry);
3698                 }
3699         }
3700
3701         /* Now clean up the sw ring entries. */
3702         i = 0;
3703         while (entry != last_plus_one) {
3704                 int len;
3705
3706                 if (i == 0)
3707                         len = skb_headlen(skb);
3708                 else
3709                         len = skb_shinfo(skb)->frags[i-1].size;
3710                 pci_unmap_single(tp->pdev,
3711                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3712                                  len, PCI_DMA_TODEVICE);
3713                 if (i == 0) {
3714                         tp->tx_buffers[entry].skb = new_skb;
3715                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3716                 } else {
3717                         tp->tx_buffers[entry].skb = NULL;
3718                 }
3719                 entry = NEXT_TX(entry);
3720                 i++;
3721         }
3722
3723         dev_kfree_skb(skb);
3724
3725         return ret;
3726 }
3727
3728 static void tg3_set_txd(struct tg3 *tp, int entry,
3729                         dma_addr_t mapping, int len, u32 flags,
3730                         u32 mss_and_is_end)
3731 {
3732         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3733         int is_end = (mss_and_is_end & 0x1);
3734         u32 mss = (mss_and_is_end >> 1);
3735         u32 vlan_tag = 0;
3736
3737         if (is_end)
3738                 flags |= TXD_FLAG_END;
3739         if (flags & TXD_FLAG_VLAN) {
3740                 vlan_tag = flags >> 16;
3741                 flags &= 0xffff;
3742         }
3743         vlan_tag |= (mss << TXD_MSS_SHIFT);
3744
3745         txd->addr_hi = ((u64) mapping >> 32);
3746         txd->addr_lo = ((u64) mapping & 0xffffffff);
3747         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3748         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3749 }
3750
3751 /* hard_start_xmit for devices that don't have any bugs and
3752  * support TG3_FLG2_HW_TSO_2 only.
3753  */
3754 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3755 {
3756         struct tg3 *tp = netdev_priv(dev);
3757         dma_addr_t mapping;
3758         u32 len, entry, base_flags, mss;
3759
3760         len = skb_headlen(skb);
3761
3762         /* No BH disabling for tx_lock here.  We are running in BH disabled
3763          * context and TX reclaim runs via tp->poll inside of a software
3764          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3765          * no IRQ context deadlocks to worry about either.  Rejoice!
3766          */
3767         if (!spin_trylock(&tp->tx_lock))
3768                 return NETDEV_TX_LOCKED;
3769
3770         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3771                 if (!netif_queue_stopped(dev)) {
3772                         netif_stop_queue(dev);
3773
3774                         /* This is a hard error, log it. */
3775                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3776                                "queue awake!\n", dev->name);
3777                 }
3778                 spin_unlock(&tp->tx_lock);
3779                 return NETDEV_TX_BUSY;
3780         }
3781
3782         entry = tp->tx_prod;
3783         base_flags = 0;
3784 #if TG3_TSO_SUPPORT != 0
3785         mss = 0;
3786         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3787             (mss = skb_shinfo(skb)->tso_size) != 0) {
3788                 int tcp_opt_len, ip_tcp_len;
3789
3790                 if (skb_header_cloned(skb) &&
3791                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3792                         dev_kfree_skb(skb);
3793                         goto out_unlock;
3794                 }
3795
3796                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3797                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3798
3799                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3800                                TXD_FLAG_CPU_POST_DMA);
3801
3802                 skb->nh.iph->check = 0;
3803                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3804
3805                 skb->h.th->check = 0;
3806
3807                 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3808         }
3809         else if (skb->ip_summed == CHECKSUM_HW)
3810                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3811 #else
3812         mss = 0;
3813         if (skb->ip_summed == CHECKSUM_HW)
3814                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3815 #endif
3816 #if TG3_VLAN_TAG_USED
3817         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3818                 base_flags |= (TXD_FLAG_VLAN |
3819                                (vlan_tx_tag_get(skb) << 16));
3820 #endif
3821
3822         /* Queue skb data, a.k.a. the main skb fragment. */
3823         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3824
3825         tp->tx_buffers[entry].skb = skb;
3826         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3827
3828         tg3_set_txd(tp, entry, mapping, len, base_flags,
3829                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3830
3831         entry = NEXT_TX(entry);
3832
3833         /* Now loop through additional data fragments, and queue them. */
3834         if (skb_shinfo(skb)->nr_frags > 0) {
3835                 unsigned int i, last;
3836
3837                 last = skb_shinfo(skb)->nr_frags - 1;
3838                 for (i = 0; i <= last; i++) {
3839                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3840
3841                         len = frag->size;
3842                         mapping = pci_map_page(tp->pdev,
3843                                                frag->page,
3844                                                frag->page_offset,
3845                                                len, PCI_DMA_TODEVICE);
3846
3847                         tp->tx_buffers[entry].skb = NULL;
3848                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3849
3850                         tg3_set_txd(tp, entry, mapping, len,
3851                                     base_flags, (i == last) | (mss << 1));
3852
3853                         entry = NEXT_TX(entry);
3854                 }
3855         }
3856
3857         /* Packets are ready, update Tx producer idx local and on card. */
3858         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3859
3860         tp->tx_prod = entry;
3861         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3862                 netif_stop_queue(dev);
3863                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3864                         netif_wake_queue(tp->dev);
3865         }
3866
3867 out_unlock:
3868         mmiowb();
3869         spin_unlock(&tp->tx_lock);
3870
3871         dev->trans_start = jiffies;
3872
3873         return NETDEV_TX_OK;
3874 }
3875
3876 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3877  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3878  */
3879 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3880 {
3881         struct tg3 *tp = netdev_priv(dev);
3882         dma_addr_t mapping;
3883         u32 len, entry, base_flags, mss;
3884         int would_hit_hwbug;
3885
3886         len = skb_headlen(skb);
3887
3888         /* No BH disabling for tx_lock here.  We are running in BH disabled
3889          * context and TX reclaim runs via tp->poll inside of a software
3890          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3891          * no IRQ context deadlocks to worry about either.  Rejoice!
3892          */
3893         if (!spin_trylock(&tp->tx_lock))
3894                 return NETDEV_TX_LOCKED; 
3895
3896         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3897                 if (!netif_queue_stopped(dev)) {
3898                         netif_stop_queue(dev);
3899
3900                         /* This is a hard error, log it. */
3901                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3902                                "queue awake!\n", dev->name);
3903                 }
3904                 spin_unlock(&tp->tx_lock);
3905                 return NETDEV_TX_BUSY;
3906         }
3907
3908         entry = tp->tx_prod;
3909         base_flags = 0;
3910         if (skb->ip_summed == CHECKSUM_HW)
3911                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3912 #if TG3_TSO_SUPPORT != 0
3913         mss = 0;
3914         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3915             (mss = skb_shinfo(skb)->tso_size) != 0) {
3916                 int tcp_opt_len, ip_tcp_len;
3917
3918                 if (skb_header_cloned(skb) &&
3919                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3920                         dev_kfree_skb(skb);
3921                         goto out_unlock;
3922                 }
3923
3924                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3925                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3926
3927                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3928                                TXD_FLAG_CPU_POST_DMA);
3929
3930                 skb->nh.iph->check = 0;
3931                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3932                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3933                         skb->h.th->check = 0;
3934                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3935                 }
3936                 else {
3937                         skb->h.th->check =
3938                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3939                                                    skb->nh.iph->daddr,
3940                                                    0, IPPROTO_TCP, 0);
3941                 }
3942
3943                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3944                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3945                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3946                                 int tsflags;
3947
3948                                 tsflags = ((skb->nh.iph->ihl - 5) +
3949                                            (tcp_opt_len >> 2));
3950                                 mss |= (tsflags << 11);
3951                         }
3952                 } else {
3953                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3954                                 int tsflags;
3955
3956                                 tsflags = ((skb->nh.iph->ihl - 5) +
3957                                            (tcp_opt_len >> 2));
3958                                 base_flags |= tsflags << 12;
3959                         }
3960                 }
3961         }
3962 #else
3963         mss = 0;
3964 #endif
3965 #if TG3_VLAN_TAG_USED
3966         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3967                 base_flags |= (TXD_FLAG_VLAN |
3968                                (vlan_tx_tag_get(skb) << 16));
3969 #endif
3970
3971         /* Queue skb data, a.k.a. the main skb fragment. */
3972         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3973
3974         tp->tx_buffers[entry].skb = skb;
3975         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3976
3977         would_hit_hwbug = 0;
3978
3979         if (tg3_4g_overflow_test(mapping, len))
3980                 would_hit_hwbug = 1;
3981
3982         tg3_set_txd(tp, entry, mapping, len, base_flags,
3983                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3984
3985         entry = NEXT_TX(entry);
3986
3987         /* Now loop through additional data fragments, and queue them. */
3988         if (skb_shinfo(skb)->nr_frags > 0) {
3989                 unsigned int i, last;
3990
3991                 last = skb_shinfo(skb)->nr_frags - 1;
3992                 for (i = 0; i <= last; i++) {
3993                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3994
3995                         len = frag->size;
3996                         mapping = pci_map_page(tp->pdev,
3997                                                frag->page,
3998                                                frag->page_offset,
3999                                                len, PCI_DMA_TODEVICE);
4000
4001                         tp->tx_buffers[entry].skb = NULL;
4002                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4003
4004                         if (tg3_4g_overflow_test(mapping, len))
4005                                 would_hit_hwbug = 1;
4006
4007                         if (tg3_40bit_overflow_test(tp, mapping, len))
4008                                 would_hit_hwbug = 1;
4009
4010                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4011                                 tg3_set_txd(tp, entry, mapping, len,
4012                                             base_flags, (i == last)|(mss << 1));
4013                         else
4014                                 tg3_set_txd(tp, entry, mapping, len,
4015                                             base_flags, (i == last));
4016
4017                         entry = NEXT_TX(entry);
4018                 }
4019         }
4020
4021         if (would_hit_hwbug) {
4022                 u32 last_plus_one = entry;
4023                 u32 start;
4024
4025                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4026                 start &= (TG3_TX_RING_SIZE - 1);
4027
4028                 /* If the workaround fails due to memory/mapping
4029                  * failure, silently drop this packet.
4030                  */
4031                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4032                                                 &start, base_flags, mss))
4033                         goto out_unlock;
4034
4035                 entry = start;
4036         }
4037
4038         /* Packets are ready, update Tx producer idx local and on card. */
4039         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4040
4041         tp->tx_prod = entry;
4042         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
4043                 netif_stop_queue(dev);
4044                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
4045                         netif_wake_queue(tp->dev);
4046         }
4047
4048 out_unlock:
4049         mmiowb();
4050         spin_unlock(&tp->tx_lock);
4051
4052         dev->trans_start = jiffies;
4053
4054         return NETDEV_TX_OK;
4055 }
4056
4057 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4058                                int new_mtu)
4059 {
4060         dev->mtu = new_mtu;
4061
4062         if (new_mtu > ETH_DATA_LEN) {
4063                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4064                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4065                         ethtool_op_set_tso(dev, 0);
4066                 }
4067                 else
4068                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4069         } else {
4070                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4071                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4072                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4073         }
4074 }
4075
4076 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4077 {
4078         struct tg3 *tp = netdev_priv(dev);
4079
4080         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4081                 return -EINVAL;
4082
4083         if (!netif_running(dev)) {
4084                 /* We'll just catch it later when the
4085                  * device is up'd.
4086                  */
4087                 tg3_set_mtu(dev, tp, new_mtu);
4088                 return 0;
4089         }
4090
4091         tg3_netif_stop(tp);
4092
4093         tg3_full_lock(tp, 1);
4094
4095         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4096
4097         tg3_set_mtu(dev, tp, new_mtu);
4098
4099         tg3_init_hw(tp, 0);
4100
4101         tg3_netif_start(tp);
4102
4103         tg3_full_unlock(tp);
4104
4105         return 0;
4106 }
4107
4108 /* Free up pending packets in all rx/tx rings.
4109  *
4110  * The chip has been shut down and the driver detached from
4111  * the networking, so no interrupts or new tx packets will
4112  * end up in the driver.  tp->{tx,}lock is not held and we are not
4113  * in an interrupt context and thus may sleep.
4114  */
4115 static void tg3_free_rings(struct tg3 *tp)
4116 {
4117         struct ring_info *rxp;
4118         int i;
4119
4120         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4121                 rxp = &tp->rx_std_buffers[i];
4122
4123                 if (rxp->skb == NULL)
4124                         continue;
4125                 pci_unmap_single(tp->pdev,
4126                                  pci_unmap_addr(rxp, mapping),
4127                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4128                                  PCI_DMA_FROMDEVICE);
4129                 dev_kfree_skb_any(rxp->skb);
4130                 rxp->skb = NULL;
4131         }
4132
4133         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4134                 rxp = &tp->rx_jumbo_buffers[i];
4135
4136                 if (rxp->skb == NULL)
4137                         continue;
4138                 pci_unmap_single(tp->pdev,
4139                                  pci_unmap_addr(rxp, mapping),
4140                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4141                                  PCI_DMA_FROMDEVICE);
4142                 dev_kfree_skb_any(rxp->skb);
4143                 rxp->skb = NULL;
4144         }
4145
4146         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4147                 struct tx_ring_info *txp;
4148                 struct sk_buff *skb;
4149                 int j;
4150
4151                 txp = &tp->tx_buffers[i];
4152                 skb = txp->skb;
4153
4154                 if (skb == NULL) {
4155                         i++;
4156                         continue;
4157                 }
4158
4159                 pci_unmap_single(tp->pdev,
4160                                  pci_unmap_addr(txp, mapping),
4161                                  skb_headlen(skb),
4162                                  PCI_DMA_TODEVICE);
4163                 txp->skb = NULL;
4164
4165                 i++;
4166
4167                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4168                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4169                         pci_unmap_page(tp->pdev,
4170                                        pci_unmap_addr(txp, mapping),
4171                                        skb_shinfo(skb)->frags[j].size,
4172                                        PCI_DMA_TODEVICE);
4173                         i++;
4174                 }
4175
4176                 dev_kfree_skb_any(skb);
4177         }
4178 }
4179
4180 /* Initialize tx/rx rings for packet processing.
4181  *
4182  * The chip has been shut down and the driver detached from
4183  * the networking, so no interrupts or new tx packets will
4184  * end up in the driver.  tp->{tx,}lock are held and thus
4185  * we may not sleep.
4186  */
4187 static void tg3_init_rings(struct tg3 *tp)
4188 {
4189         u32 i;
4190
4191         /* Free up all the SKBs. */
4192         tg3_free_rings(tp);
4193
4194         /* Zero out all descriptors. */
4195         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4196         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4197         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4198         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4199
4200         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4201         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4202             (tp->dev->mtu > ETH_DATA_LEN))
4203                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4204
4205         /* Initialize invariants of the rings, we only set this
4206          * stuff once.  This works because the card does not
4207          * write into the rx buffer posting rings.
4208          */
4209         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4210                 struct tg3_rx_buffer_desc *rxd;
4211
4212                 rxd = &tp->rx_std[i];
4213                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4214                         << RXD_LEN_SHIFT;
4215                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4216                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4217                                (i << RXD_OPAQUE_INDEX_SHIFT));
4218         }
4219
4220         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4221                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4222                         struct tg3_rx_buffer_desc *rxd;
4223
4224                         rxd = &tp->rx_jumbo[i];
4225                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4226                                 << RXD_LEN_SHIFT;
4227                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4228                                 RXD_FLAG_JUMBO;
4229                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4230                                (i << RXD_OPAQUE_INDEX_SHIFT));
4231                 }
4232         }
4233
4234         /* Now allocate fresh SKBs for each rx ring. */
4235         for (i = 0; i < tp->rx_pending; i++) {
4236                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4237                                      -1, i) < 0)
4238                         break;
4239         }
4240
4241         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4242                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4243                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4244                                              -1, i) < 0)
4245                                 break;
4246                 }
4247         }
4248 }
4249
4250 /*
4251  * Must not be invoked with interrupt sources disabled and
4252  * the hardware shutdown down.
4253  */
4254 static void tg3_free_consistent(struct tg3 *tp)
4255 {
4256         kfree(tp->rx_std_buffers);
4257         tp->rx_std_buffers = NULL;
4258         if (tp->rx_std) {
4259                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4260                                     tp->rx_std, tp->rx_std_mapping);
4261                 tp->rx_std = NULL;
4262         }
4263         if (tp->rx_jumbo) {
4264                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4265                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4266                 tp->rx_jumbo = NULL;
4267         }
4268         if (tp->rx_rcb) {
4269                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4270                                     tp->rx_rcb, tp->rx_rcb_mapping);
4271                 tp->rx_rcb = NULL;
4272         }
4273         if (tp->tx_ring) {
4274                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4275                         tp->tx_ring, tp->tx_desc_mapping);
4276                 tp->tx_ring = NULL;
4277         }
4278         if (tp->hw_status) {
4279                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4280                                     tp->hw_status, tp->status_mapping);
4281                 tp->hw_status = NULL;
4282         }
4283         if (tp->hw_stats) {
4284                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4285                                     tp->hw_stats, tp->stats_mapping);
4286                 tp->hw_stats = NULL;
4287         }
4288 }
4289
4290 /*
4291  * Must not be invoked with interrupt sources disabled and
4292  * the hardware shutdown down.  Can sleep.
4293  */
4294 static int tg3_alloc_consistent(struct tg3 *tp)
4295 {
4296         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4297                                       (TG3_RX_RING_SIZE +
4298                                        TG3_RX_JUMBO_RING_SIZE)) +
4299                                      (sizeof(struct tx_ring_info) *
4300                                       TG3_TX_RING_SIZE),
4301                                      GFP_KERNEL);
4302         if (!tp->rx_std_buffers)
4303                 return -ENOMEM;
4304
4305         memset(tp->rx_std_buffers, 0,
4306                (sizeof(struct ring_info) *
4307                 (TG3_RX_RING_SIZE +
4308                  TG3_RX_JUMBO_RING_SIZE)) +
4309                (sizeof(struct tx_ring_info) *
4310                 TG3_TX_RING_SIZE));
4311
4312         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4313         tp->tx_buffers = (struct tx_ring_info *)
4314                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4315
4316         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4317                                           &tp->rx_std_mapping);
4318         if (!tp->rx_std)
4319                 goto err_out;
4320
4321         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4322                                             &tp->rx_jumbo_mapping);
4323
4324         if (!tp->rx_jumbo)
4325                 goto err_out;
4326
4327         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4328                                           &tp->rx_rcb_mapping);
4329         if (!tp->rx_rcb)
4330                 goto err_out;
4331
4332         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4333                                            &tp->tx_desc_mapping);
4334         if (!tp->tx_ring)
4335                 goto err_out;
4336
4337         tp->hw_status = pci_alloc_consistent(tp->pdev,
4338                                              TG3_HW_STATUS_SIZE,
4339                                              &tp->status_mapping);
4340         if (!tp->hw_status)
4341                 goto err_out;
4342
4343         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4344                                             sizeof(struct tg3_hw_stats),
4345                                             &tp->stats_mapping);
4346         if (!tp->hw_stats)
4347                 goto err_out;
4348
4349         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4350         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4351
4352         return 0;
4353
4354 err_out:
4355         tg3_free_consistent(tp);
4356         return -ENOMEM;
4357 }
4358
4359 #define MAX_WAIT_CNT 1000
4360
4361 /* To stop a block, clear the enable bit and poll till it
4362  * clears.  tp->lock is held.
4363  */
4364 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4365 {
4366         unsigned int i;
4367         u32 val;
4368
4369         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4370                 switch (ofs) {
4371                 case RCVLSC_MODE:
4372                 case DMAC_MODE:
4373                 case MBFREE_MODE:
4374                 case BUFMGR_MODE:
4375                 case MEMARB_MODE:
4376                         /* We can't enable/disable these bits of the
4377                          * 5705/5750, just say success.
4378                          */
4379                         return 0;
4380
4381                 default:
4382                         break;
4383                 };
4384         }
4385
4386         val = tr32(ofs);
4387         val &= ~enable_bit;
4388         tw32_f(ofs, val);
4389
4390         for (i = 0; i < MAX_WAIT_CNT; i++) {
4391                 udelay(100);
4392                 val = tr32(ofs);
4393                 if ((val & enable_bit) == 0)
4394                         break;
4395         }
4396
4397         if (i == MAX_WAIT_CNT && !silent) {
4398                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4399                        "ofs=%lx enable_bit=%x\n",
4400                        ofs, enable_bit);
4401                 return -ENODEV;
4402         }
4403
4404         return 0;
4405 }
4406
4407 /* tp->lock is held. */
4408 static int tg3_abort_hw(struct tg3 *tp, int silent)
4409 {
4410         int i, err;
4411
4412         tg3_disable_ints(tp);
4413
4414         tp->rx_mode &= ~RX_MODE_ENABLE;
4415         tw32_f(MAC_RX_MODE, tp->rx_mode);
4416         udelay(10);
4417
4418         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4419         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4420         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4421         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4422         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4423         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4424
4425         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4426         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4427         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4428         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4429         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4430         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4431         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4432
4433         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4434         tw32_f(MAC_MODE, tp->mac_mode);
4435         udelay(40);
4436
4437         tp->tx_mode &= ~TX_MODE_ENABLE;
4438         tw32_f(MAC_TX_MODE, tp->tx_mode);
4439
4440         for (i = 0; i < MAX_WAIT_CNT; i++) {
4441                 udelay(100);
4442                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4443                         break;
4444         }
4445         if (i >= MAX_WAIT_CNT) {
4446                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4447                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4448                        tp->dev->name, tr32(MAC_TX_MODE));
4449                 err |= -ENODEV;
4450         }
4451
4452         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4453         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4454         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4455
4456         tw32(FTQ_RESET, 0xffffffff);
4457         tw32(FTQ_RESET, 0x00000000);
4458
4459         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4460         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4461
4462         if (tp->hw_status)
4463                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4464         if (tp->hw_stats)
4465                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4466
4467         return err;
4468 }
4469
4470 /* tp->lock is held. */
4471 static int tg3_nvram_lock(struct tg3 *tp)
4472 {
4473         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4474                 int i;
4475
4476                 if (tp->nvram_lock_cnt == 0) {
4477                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4478                         for (i = 0; i < 8000; i++) {
4479                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4480                                         break;
4481                                 udelay(20);
4482                         }
4483                         if (i == 8000) {
4484                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4485                                 return -ENODEV;
4486                         }
4487                 }
4488                 tp->nvram_lock_cnt++;
4489         }
4490         return 0;
4491 }
4492
4493 /* tp->lock is held. */
4494 static void tg3_nvram_unlock(struct tg3 *tp)
4495 {
4496         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4497                 if (tp->nvram_lock_cnt > 0)
4498                         tp->nvram_lock_cnt--;
4499                 if (tp->nvram_lock_cnt == 0)
4500                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4501         }
4502 }
4503
4504 /* tp->lock is held. */
4505 static void tg3_enable_nvram_access(struct tg3 *tp)
4506 {
4507         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4508             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4509                 u32 nvaccess = tr32(NVRAM_ACCESS);
4510
4511                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4512         }
4513 }
4514
4515 /* tp->lock is held. */
4516 static void tg3_disable_nvram_access(struct tg3 *tp)
4517 {
4518         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4519             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4520                 u32 nvaccess = tr32(NVRAM_ACCESS);
4521
4522                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4523         }
4524 }
4525
4526 /* tp->lock is held. */
4527 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4528 {
4529         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4530                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4531
4532         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4533                 switch (kind) {
4534                 case RESET_KIND_INIT:
4535                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4536                                       DRV_STATE_START);
4537                         break;
4538
4539                 case RESET_KIND_SHUTDOWN:
4540                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4541                                       DRV_STATE_UNLOAD);
4542                         break;
4543
4544                 case RESET_KIND_SUSPEND:
4545                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4546                                       DRV_STATE_SUSPEND);
4547                         break;
4548
4549                 default:
4550                         break;
4551                 };
4552         }
4553 }
4554
4555 /* tp->lock is held. */
4556 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4557 {
4558         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4559                 switch (kind) {
4560                 case RESET_KIND_INIT:
4561                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4562                                       DRV_STATE_START_DONE);
4563                         break;
4564
4565                 case RESET_KIND_SHUTDOWN:
4566                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4567                                       DRV_STATE_UNLOAD_DONE);
4568                         break;
4569
4570                 default:
4571                         break;
4572                 };
4573         }
4574 }
4575
4576 /* tp->lock is held. */
4577 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4578 {
4579         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4580                 switch (kind) {
4581                 case RESET_KIND_INIT:
4582                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4583                                       DRV_STATE_START);
4584                         break;
4585
4586                 case RESET_KIND_SHUTDOWN:
4587                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4588                                       DRV_STATE_UNLOAD);
4589                         break;
4590
4591                 case RESET_KIND_SUSPEND:
4592                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4593                                       DRV_STATE_SUSPEND);
4594                         break;
4595
4596                 default:
4597                         break;
4598                 };
4599         }
4600 }
4601
4602 static void tg3_stop_fw(struct tg3 *);
4603
4604 /* tp->lock is held. */
4605 static int tg3_chip_reset(struct tg3 *tp)
4606 {
4607         u32 val;
4608         void (*write_op)(struct tg3 *, u32, u32);
4609         int i;
4610
4611         tg3_nvram_lock(tp);
4612
4613         /* No matching tg3_nvram_unlock() after this because
4614          * chip reset below will undo the nvram lock.
4615          */
4616         tp->nvram_lock_cnt = 0;
4617
4618         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4619             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4620             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4621                 tw32(GRC_FASTBOOT_PC, 0);
4622
4623         /*
4624          * We must avoid the readl() that normally takes place.
4625          * It locks machines, causes machine checks, and other
4626          * fun things.  So, temporarily disable the 5701
4627          * hardware workaround, while we do the reset.
4628          */
4629         write_op = tp->write32;
4630         if (write_op == tg3_write_flush_reg32)
4631                 tp->write32 = tg3_write32;
4632
4633         /* do the reset */
4634         val = GRC_MISC_CFG_CORECLK_RESET;
4635
4636         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4637                 if (tr32(0x7e2c) == 0x60) {
4638                         tw32(0x7e2c, 0x20);
4639                 }
4640                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4641                         tw32(GRC_MISC_CFG, (1 << 29));
4642                         val |= (1 << 29);
4643                 }
4644         }
4645
4646         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4647                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4648         tw32(GRC_MISC_CFG, val);
4649
4650         /* restore 5701 hardware bug workaround write method */
4651         tp->write32 = write_op;
4652
4653         /* Unfortunately, we have to delay before the PCI read back.
4654          * Some 575X chips even will not respond to a PCI cfg access
4655          * when the reset command is given to the chip.
4656          *
4657          * How do these hardware designers expect things to work
4658          * properly if the PCI write is posted for a long period
4659          * of time?  It is always necessary to have some method by
4660          * which a register read back can occur to push the write
4661          * out which does the reset.
4662          *
4663          * For most tg3 variants the trick below was working.
4664          * Ho hum...
4665          */
4666         udelay(120);
4667
4668         /* Flush PCI posted writes.  The normal MMIO registers
4669          * are inaccessible at this time so this is the only
4670          * way to make this reliably (actually, this is no longer
4671          * the case, see above).  I tried to use indirect
4672          * register read/write but this upset some 5701 variants.
4673          */
4674         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4675
4676         udelay(120);
4677
4678         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4679                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4680                         int i;
4681                         u32 cfg_val;
4682
4683                         /* Wait for link training to complete.  */
4684                         for (i = 0; i < 5000; i++)
4685                                 udelay(100);
4686
4687                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4688                         pci_write_config_dword(tp->pdev, 0xc4,
4689                                                cfg_val | (1 << 15));
4690                 }
4691                 /* Set PCIE max payload size and clear error status.  */
4692                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4693         }
4694
4695         /* Re-enable indirect register accesses. */
4696         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4697                                tp->misc_host_ctrl);
4698
4699         /* Set MAX PCI retry to zero. */
4700         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4701         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4702             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4703                 val |= PCISTATE_RETRY_SAME_DMA;
4704         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4705
4706         pci_restore_state(tp->pdev);
4707
4708         /* Make sure PCI-X relaxed ordering bit is clear. */
4709         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4710         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4711         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4712
4713         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4714                 u32 val;
4715
4716                 /* Chip reset on 5780 will reset MSI enable bit,
4717                  * so need to restore it.
4718                  */
4719                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4720                         u16 ctrl;
4721
4722                         pci_read_config_word(tp->pdev,
4723                                              tp->msi_cap + PCI_MSI_FLAGS,
4724                                              &ctrl);
4725                         pci_write_config_word(tp->pdev,
4726                                               tp->msi_cap + PCI_MSI_FLAGS,
4727                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4728                         val = tr32(MSGINT_MODE);
4729                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4730                 }
4731
4732                 val = tr32(MEMARB_MODE);
4733                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4734
4735         } else
4736                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4737
4738         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4739                 tg3_stop_fw(tp);
4740                 tw32(0x5000, 0x400);
4741         }
4742
4743         tw32(GRC_MODE, tp->grc_mode);
4744
4745         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4746                 u32 val = tr32(0xc4);
4747
4748                 tw32(0xc4, val | (1 << 15));
4749         }
4750
4751         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4752             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4753                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4754                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4755                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4756                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4757         }
4758
4759         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4760                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4761                 tw32_f(MAC_MODE, tp->mac_mode);
4762         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4763                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4764                 tw32_f(MAC_MODE, tp->mac_mode);
4765         } else
4766                 tw32_f(MAC_MODE, 0);
4767         udelay(40);
4768
4769         /* Wait for firmware initialization to complete. */
4770         for (i = 0; i < 100000; i++) {
4771                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4772                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4773                         break;
4774                 udelay(10);
4775         }
4776
4777         /* Chip might not be fitted with firmare.  Some Sun onboard
4778          * parts are configured like that.  So don't signal the timeout
4779          * of the above loop as an error, but do report the lack of
4780          * running firmware once.
4781          */
4782         if (i >= 100000 &&
4783             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4784                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4785
4786                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4787                        tp->dev->name);
4788         }
4789
4790         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4791             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4792                 u32 val = tr32(0x7c00);
4793
4794                 tw32(0x7c00, val | (1 << 25));
4795         }
4796
4797         /* Reprobe ASF enable state.  */
4798         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4799         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4800         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4801         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4802                 u32 nic_cfg;
4803
4804                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4805                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4806                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4807                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4808                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4809                 }
4810         }
4811
4812         return 0;
4813 }
4814
4815 /* tp->lock is held. */
4816 static void tg3_stop_fw(struct tg3 *tp)
4817 {
4818         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4819                 u32 val;
4820                 int i;
4821
4822                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4823                 val = tr32(GRC_RX_CPU_EVENT);
4824                 val |= (1 << 14);
4825                 tw32(GRC_RX_CPU_EVENT, val);
4826
4827                 /* Wait for RX cpu to ACK the event.  */
4828                 for (i = 0; i < 100; i++) {
4829                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4830                                 break;
4831                         udelay(1);
4832                 }
4833         }
4834 }
4835
4836 /* tp->lock is held. */
4837 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4838 {
4839         int err;
4840
4841         tg3_stop_fw(tp);
4842
4843         tg3_write_sig_pre_reset(tp, kind);
4844
4845         tg3_abort_hw(tp, silent);
4846         err = tg3_chip_reset(tp);
4847
4848         tg3_write_sig_legacy(tp, kind);
4849         tg3_write_sig_post_reset(tp, kind);
4850
4851         if (err)
4852                 return err;
4853
4854         return 0;
4855 }
4856
4857 #define TG3_FW_RELEASE_MAJOR    0x0
4858 #define TG3_FW_RELASE_MINOR     0x0
4859 #define TG3_FW_RELEASE_FIX      0x0
4860 #define TG3_FW_START_ADDR       0x08000000
4861 #define TG3_FW_TEXT_ADDR        0x08000000
4862 #define TG3_FW_TEXT_LEN         0x9c0
4863 #define TG3_FW_RODATA_ADDR      0x080009c0
4864 #define TG3_FW_RODATA_LEN       0x60
4865 #define TG3_FW_DATA_ADDR        0x08000a40
4866 #define TG3_FW_DATA_LEN         0x20
4867 #define TG3_FW_SBSS_ADDR        0x08000a60
4868 #define TG3_FW_SBSS_LEN         0xc
4869 #define TG3_FW_BSS_ADDR         0x08000a70
4870 #define TG3_FW_BSS_LEN          0x10
4871
4872 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4873         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4874         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4875         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4876         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4877         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4878         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4879         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4880         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4881         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4882         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4883         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4884         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4885         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4886         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4887         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4888         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4889         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4890         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4891         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4892         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4893         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4894         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4895         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4896         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4897         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4898         0, 0, 0, 0, 0, 0,
4899         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4900         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4901         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4902         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4903         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4904         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4905         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4906         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4907         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4908         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4909         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4910         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4911         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4912         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4913         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4914         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4915         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4916         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4917         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4918         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4919         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4920         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4921         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4922         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4923         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4924         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4925         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4926         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4927         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4928         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4929         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4930         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4931         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4932         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4933         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4934         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4935         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4936         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4937         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4938         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4939         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4940         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4941         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4942         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4943         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4944         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4945         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4946         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4947         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4948         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4949         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4950         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4951         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4952         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4953         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4954         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4955         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4956         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4957         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4958         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4959         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4960         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4961         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4962         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4963         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4964 };
4965
4966 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4967         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4968         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4969         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4970         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4971         0x00000000
4972 };
4973
4974 #if 0 /* All zeros, don't eat up space with it. */
4975 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4976         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4977         0x00000000, 0x00000000, 0x00000000, 0x00000000
4978 };
4979 #endif
4980
4981 #define RX_CPU_SCRATCH_BASE     0x30000
4982 #define RX_CPU_SCRATCH_SIZE     0x04000
4983 #define TX_CPU_SCRATCH_BASE     0x34000
4984 #define TX_CPU_SCRATCH_SIZE     0x04000
4985
4986 /* tp->lock is held. */
4987 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4988 {
4989         int i;
4990
4991         BUG_ON(offset == TX_CPU_BASE &&
4992             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
4993
4994         if (offset == RX_CPU_BASE) {
4995                 for (i = 0; i < 10000; i++) {
4996                         tw32(offset + CPU_STATE, 0xffffffff);
4997                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4998                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4999                                 break;
5000                 }
5001
5002                 tw32(offset + CPU_STATE, 0xffffffff);
5003                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5004                 udelay(10);
5005         } else {
5006                 for (i = 0; i < 10000; i++) {
5007                         tw32(offset + CPU_STATE, 0xffffffff);
5008                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5009                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5010                                 break;
5011                 }
5012         }
5013
5014         if (i >= 10000) {
5015                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5016                        "and %s CPU\n",
5017                        tp->dev->name,
5018                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5019                 return -ENODEV;
5020         }
5021
5022         /* Clear firmware's nvram arbitration. */
5023         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5024                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5025         return 0;
5026 }
5027
5028 struct fw_info {
5029         unsigned int text_base;
5030         unsigned int text_len;
5031         u32 *text_data;
5032         unsigned int rodata_base;
5033         unsigned int rodata_len;
5034         u32 *rodata_data;
5035         unsigned int data_base;
5036         unsigned int data_len;
5037         u32 *data_data;
5038 };
5039
5040 /* tp->lock is held. */
5041 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5042                                  int cpu_scratch_size, struct fw_info *info)
5043 {
5044         int err, lock_err, i;
5045         void (*write_op)(struct tg3 *, u32, u32);
5046
5047         if (cpu_base == TX_CPU_BASE &&
5048             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5049                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5050                        "TX cpu firmware on %s which is 5705.\n",
5051                        tp->dev->name);
5052                 return -EINVAL;
5053         }
5054
5055         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5056                 write_op = tg3_write_mem;
5057         else
5058                 write_op = tg3_write_indirect_reg32;
5059
5060         /* It is possible that bootcode is still loading at this point.
5061          * Get the nvram lock first before halting the cpu.
5062          */
5063         lock_err = tg3_nvram_lock(tp);
5064         err = tg3_halt_cpu(tp, cpu_base);
5065         if (!lock_err)
5066                 tg3_nvram_unlock(tp);
5067         if (err)
5068                 goto out;
5069
5070         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5071                 write_op(tp, cpu_scratch_base + i, 0);
5072         tw32(cpu_base + CPU_STATE, 0xffffffff);
5073         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5074         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5075                 write_op(tp, (cpu_scratch_base +
5076                               (info->text_base & 0xffff) +
5077                               (i * sizeof(u32))),
5078                          (info->text_data ?
5079                           info->text_data[i] : 0));
5080         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5081                 write_op(tp, (cpu_scratch_base +
5082                               (info->rodata_base & 0xffff) +
5083                               (i * sizeof(u32))),
5084                          (info->rodata_data ?
5085                           info->rodata_data[i] : 0));
5086         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5087                 write_op(tp, (cpu_scratch_base +
5088                               (info->data_base & 0xffff) +
5089                               (i * sizeof(u32))),
5090                          (info->data_data ?
5091                           info->data_data[i] : 0));
5092
5093         err = 0;
5094
5095 out:
5096         return err;
5097 }
5098
5099 /* tp->lock is held. */
5100 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5101 {
5102         struct fw_info info;
5103         int err, i;
5104
5105         info.text_base = TG3_FW_TEXT_ADDR;
5106         info.text_len = TG3_FW_TEXT_LEN;
5107         info.text_data = &tg3FwText[0];
5108         info.rodata_base = TG3_FW_RODATA_ADDR;
5109         info.rodata_len = TG3_FW_RODATA_LEN;
5110         info.rodata_data = &tg3FwRodata[0];
5111         info.data_base = TG3_FW_DATA_ADDR;
5112         info.data_len = TG3_FW_DATA_LEN;
5113         info.data_data = NULL;
5114
5115         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5116                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5117                                     &info);
5118         if (err)
5119                 return err;
5120
5121         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5122                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5123                                     &info);
5124         if (err)
5125                 return err;
5126
5127         /* Now startup only the RX cpu. */
5128         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5129         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5130
5131         for (i = 0; i < 5; i++) {
5132                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5133                         break;
5134                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5135                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5136                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5137                 udelay(1000);
5138         }
5139         if (i >= 5) {
5140                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5141                        "to set RX CPU PC, is %08x should be %08x\n",
5142                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5143                        TG3_FW_TEXT_ADDR);
5144                 return -ENODEV;
5145         }
5146         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5147         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5148
5149         return 0;
5150 }
5151
5152 #if TG3_TSO_SUPPORT != 0
5153
5154 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5155 #define TG3_TSO_FW_RELASE_MINOR         0x6
5156 #define TG3_TSO_FW_RELEASE_FIX          0x0
5157 #define TG3_TSO_FW_START_ADDR           0x08000000
5158 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5159 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5160 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5161 #define TG3_TSO_FW_RODATA_LEN           0x60
5162 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5163 #define TG3_TSO_FW_DATA_LEN             0x30
5164 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5165 #define TG3_TSO_FW_SBSS_LEN             0x2c
5166 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5167 #define TG3_TSO_FW_BSS_LEN              0x894
5168
5169 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5170         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5171         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5172         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5173         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5174         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5175         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5176         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5177         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5178         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5179         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5180         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5181         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5182         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5183         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5184         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5185         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5186         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5187         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5188         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5189         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5190         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5191         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5192         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5193         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5194         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5195         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5196         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5197         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5198         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5199         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5200         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5201         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5202         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5203         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5204         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5205         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5206         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5207         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5208         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5209         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5210         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5211         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5212         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5213         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5214         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5215         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5216         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5217         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5218         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5219         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5220         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5221         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5222         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5223         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5224         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5225         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5226         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5227         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5228         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5229         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5230         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5231         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5232         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5233         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5234         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5235         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5236         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5237         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5238         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5239         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5240         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5241         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5242         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5243         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5244         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5245         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5246         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5247         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5248         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5249         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5250         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5251         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5252         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5253         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5254         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5255         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5256         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5257         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5258         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5259         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5260         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5261         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5262         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5263         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5264         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5265         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5266         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5267         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5268         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5269         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5270         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5271         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5272         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5273         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5274         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5275         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5276         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5277         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5278         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5279         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5280         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5281         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5282         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5283         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5284         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5285         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5286         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5287         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5288         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5289         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5290         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5291         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5292         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5293         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5294         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5295         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5296         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5297         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5298         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5299         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5300         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5301         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5302         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5303         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5304         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5305         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5306         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5307         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5308         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5309         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5310         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5311         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5312         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5313         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5314         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5315         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5316         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5317         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5318         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5319         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5320         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5321         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5322         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5323         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5324         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5325         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5326         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5327         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5328         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5329         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5330         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5331         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5332         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5333         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5334         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5335         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5336         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5337         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5338         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5339         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5340         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5341         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5342         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5343         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5344         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5345         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5346         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5347         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5348         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5349         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5350         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5351         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5352         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5353         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5354         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5355         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5356         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5357         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5358         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5359         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5360         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5361         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5362         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5363         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5364         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5365         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5366         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5367         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5368         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5369         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5370         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5371         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5372         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5373         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5374         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5375         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5376         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5377         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5378         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5379         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5380         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5381         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5382         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5383         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5384         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5385         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5386         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5387         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5388         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5389         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5390         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5391         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5392         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5393         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5394         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5395         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5396         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5397         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5398         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5399         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5400         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5401         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5402         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5403         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5404         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5405         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5406         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5407         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5408         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5409         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5410         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5411         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5412         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5413         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5414         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5415         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5416         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5417         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5418         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5419         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5420         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5421         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5422         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5423         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5424         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5425         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5426         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5427         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5428         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5429         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5430         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5431         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5432         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5433         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5434         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5435         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5436         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5437         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5438         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5439         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5440         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5441         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5442         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5443         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5444         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5445         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5446         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5447         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5448         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5449         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5450         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5451         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5452         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5453         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5454 };
5455
5456 static u32 tg3TsoFwRodata[] = {
5457         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5458         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5459         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5460         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5461         0x00000000,
5462 };
5463
5464 static u32 tg3TsoFwData[] = {
5465         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5466         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5467         0x00000000,
5468 };
5469
5470 /* 5705 needs a special version of the TSO firmware.  */
5471 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5472 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5473 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5474 #define TG3_TSO5_FW_START_ADDR          0x00010000
5475 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5476 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5477 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5478 #define TG3_TSO5_FW_RODATA_LEN          0x50
5479 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5480 #define TG3_TSO5_FW_DATA_LEN            0x20
5481 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5482 #define TG3_TSO5_FW_SBSS_LEN            0x28
5483 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5484 #define TG3_TSO5_FW_BSS_LEN             0x88
5485
5486 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5487         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5488         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5489         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5490         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5491         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5492         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5493         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5494         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5495         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5496         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5497         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5498         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5499         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5500         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5501         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5502         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5503         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5504         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5505         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5506         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5507         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5508         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5509         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5510         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5511         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5512         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5513         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5514         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5515         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5516         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5517         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5518         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5519         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5520         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5521         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5522         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5523         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5524         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5525         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5526         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5527         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5528         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5529         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5530         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5531         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5532         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5533         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5534         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5535         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5536         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5537         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5538         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5539         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5540         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5541         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5542         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5543         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5544         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5545         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5546         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5547         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5548         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5549         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5550         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5551         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5552         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5553         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5554         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5555         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5556         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5557         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5558         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5559         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5560         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5561         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5562         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5563         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5564         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5565         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5566         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5567         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5568         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5569         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5570         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5571         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5572         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5573         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5574         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5575         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5576         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5577         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5578         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5579         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5580         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5581         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5582         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5583         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5584         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5585         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5586         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5587         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5588         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5589         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5590         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5591         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5592         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5593         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5594         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5595         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5596         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5597         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5598         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5599         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5600         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5601         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5602         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5603         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5604         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5605         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5606         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5607         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5608         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5609         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5610         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5611         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5612         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5613         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5614         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5615         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5616         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5617         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5618         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5619         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5620         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5621         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5622         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5623         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5624         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5625         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5626         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5627         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5628         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5629         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5630         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5631         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5632         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5633         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5634         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5635         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5636         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5637         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5638         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5639         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5640         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5641         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5642         0x00000000, 0x00000000, 0x00000000,
5643 };
5644
5645 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5646         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5647         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5648         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5649         0x00000000, 0x00000000, 0x00000000,
5650 };
5651
5652 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5653         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5654         0x00000000, 0x00000000, 0x00000000,
5655 };
5656
5657 /* tp->lock is held. */
5658 static int tg3_load_tso_firmware(struct tg3 *tp)
5659 {
5660         struct fw_info info;
5661         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5662         int err, i;
5663
5664         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5665                 return 0;
5666
5667         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5668                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5669                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5670                 info.text_data = &tg3Tso5FwText[0];
5671                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5672                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5673                 info.rodata_data = &tg3Tso5FwRodata[0];
5674                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5675                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5676                 info.data_data = &tg3Tso5FwData[0];
5677                 cpu_base = RX_CPU_BASE;
5678                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5679                 cpu_scratch_size = (info.text_len +
5680                                     info.rodata_len +
5681                                     info.data_len +
5682                                     TG3_TSO5_FW_SBSS_LEN +
5683                                     TG3_TSO5_FW_BSS_LEN);
5684         } else {
5685                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5686                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5687                 info.text_data = &tg3TsoFwText[0];
5688                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5689                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5690                 info.rodata_data = &tg3TsoFwRodata[0];
5691                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5692                 info.data_len = TG3_TSO_FW_DATA_LEN;
5693                 info.data_data = &tg3TsoFwData[0];
5694                 cpu_base = TX_CPU_BASE;
5695                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5696                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5697         }
5698
5699         err = tg3_load_firmware_cpu(tp, cpu_base,
5700                                     cpu_scratch_base, cpu_scratch_size,
5701                                     &info);
5702         if (err)
5703                 return err;
5704
5705         /* Now startup the cpu. */
5706         tw32(cpu_base + CPU_STATE, 0xffffffff);
5707         tw32_f(cpu_base + CPU_PC,    info.text_base);
5708
5709         for (i = 0; i < 5; i++) {
5710                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5711                         break;
5712                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5713                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5714                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5715                 udelay(1000);
5716         }
5717         if (i >= 5) {
5718                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5719                        "to set CPU PC, is %08x should be %08x\n",
5720                        tp->dev->name, tr32(cpu_base + CPU_PC),
5721                        info.text_base);
5722                 return -ENODEV;
5723         }
5724         tw32(cpu_base + CPU_STATE, 0xffffffff);
5725         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5726         return 0;
5727 }
5728
5729 #endif /* TG3_TSO_SUPPORT != 0 */
5730
5731 /* tp->lock is held. */
5732 static void __tg3_set_mac_addr(struct tg3 *tp)
5733 {
5734         u32 addr_high, addr_low;
5735         int i;
5736
5737         addr_high = ((tp->dev->dev_addr[0] << 8) |
5738                      tp->dev->dev_addr[1]);
5739         addr_low = ((tp->dev->dev_addr[2] << 24) |
5740                     (tp->dev->dev_addr[3] << 16) |
5741                     (tp->dev->dev_addr[4] <<  8) |
5742                     (tp->dev->dev_addr[5] <<  0));
5743         for (i = 0; i < 4; i++) {
5744                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5745                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5746         }
5747
5748         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5749             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5750                 for (i = 0; i < 12; i++) {
5751                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5752                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5753                 }
5754         }
5755
5756         addr_high = (tp->dev->dev_addr[0] +
5757                      tp->dev->dev_addr[1] +
5758                      tp->dev->dev_addr[2] +
5759                      tp->dev->dev_addr[3] +
5760                      tp->dev->dev_addr[4] +
5761                      tp->dev->dev_addr[5]) &
5762                 TX_BACKOFF_SEED_MASK;
5763         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5764 }
5765
5766 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5767 {
5768         struct tg3 *tp = netdev_priv(dev);
5769         struct sockaddr *addr = p;
5770
5771         if (!is_valid_ether_addr(addr->sa_data))
5772                 return -EINVAL;
5773
5774         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5775
5776         if (!netif_running(dev))
5777                 return 0;
5778
5779         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5780                 /* Reset chip so that ASF can re-init any MAC addresses it
5781                  * needs.
5782                  */
5783                 tg3_netif_stop(tp);
5784                 tg3_full_lock(tp, 1);
5785
5786                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5787                 tg3_init_hw(tp, 0);
5788
5789                 tg3_netif_start(tp);
5790                 tg3_full_unlock(tp);
5791         } else {
5792                 spin_lock_bh(&tp->lock);
5793                 __tg3_set_mac_addr(tp);
5794                 spin_unlock_bh(&tp->lock);
5795         }
5796
5797         return 0;
5798 }
5799
5800 /* tp->lock is held. */
5801 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5802                            dma_addr_t mapping, u32 maxlen_flags,
5803                            u32 nic_addr)
5804 {
5805         tg3_write_mem(tp,
5806                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5807                       ((u64) mapping >> 32));
5808         tg3_write_mem(tp,
5809                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5810                       ((u64) mapping & 0xffffffff));
5811         tg3_write_mem(tp,
5812                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5813                        maxlen_flags);
5814
5815         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5816                 tg3_write_mem(tp,
5817                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5818                               nic_addr);
5819 }
5820
5821 static void __tg3_set_rx_mode(struct net_device *);
5822 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5823 {
5824         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5825         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5826         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5827         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5828         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5829                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5830                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5831         }
5832         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5833         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5834         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5835                 u32 val = ec->stats_block_coalesce_usecs;
5836
5837                 if (!netif_carrier_ok(tp->dev))
5838                         val = 0;
5839
5840                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5841         }
5842 }
5843
5844 /* tp->lock is held. */
5845 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5846 {
5847         u32 val, rdmac_mode;
5848         int i, err, limit;
5849
5850         tg3_disable_ints(tp);
5851
5852         tg3_stop_fw(tp);
5853
5854         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5855
5856         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5857                 tg3_abort_hw(tp, 1);
5858         }
5859
5860         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
5861                 tg3_phy_reset(tp);
5862
5863         err = tg3_chip_reset(tp);
5864         if (err)
5865                 return err;
5866
5867         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5868
5869         /* This works around an issue with Athlon chipsets on
5870          * B3 tigon3 silicon.  This bit has no effect on any
5871          * other revision.  But do not set this on PCI Express
5872          * chips.
5873          */
5874         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5875                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5876         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5877
5878         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5879             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5880                 val = tr32(TG3PCI_PCISTATE);
5881                 val |= PCISTATE_RETRY_SAME_DMA;
5882                 tw32(TG3PCI_PCISTATE, val);
5883         }
5884
5885         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5886                 /* Enable some hw fixes.  */
5887                 val = tr32(TG3PCI_MSI_DATA);
5888                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5889                 tw32(TG3PCI_MSI_DATA, val);
5890         }
5891
5892         /* Descriptor ring init may make accesses to the
5893          * NIC SRAM area to setup the TX descriptors, so we
5894          * can only do this after the hardware has been
5895          * successfully reset.
5896          */
5897         tg3_init_rings(tp);
5898
5899         /* This value is determined during the probe time DMA
5900          * engine test, tg3_test_dma.
5901          */
5902         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5903
5904         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5905                           GRC_MODE_4X_NIC_SEND_RINGS |
5906                           GRC_MODE_NO_TX_PHDR_CSUM |
5907                           GRC_MODE_NO_RX_PHDR_CSUM);
5908         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5909
5910         /* Pseudo-header checksum is done by hardware logic and not
5911          * the offload processers, so make the chip do the pseudo-
5912          * header checksums on receive.  For transmit it is more
5913          * convenient to do the pseudo-header checksum in software
5914          * as Linux does that on transmit for us in all cases.
5915          */
5916         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5917
5918         tw32(GRC_MODE,
5919              tp->grc_mode |
5920              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5921
5922         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5923         val = tr32(GRC_MISC_CFG);
5924         val &= ~0xff;
5925         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5926         tw32(GRC_MISC_CFG, val);
5927
5928         /* Initialize MBUF/DESC pool. */
5929         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5930                 /* Do nothing.  */
5931         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5932                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5933                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5934                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5935                 else
5936                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5937                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5938                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5939         }
5940 #if TG3_TSO_SUPPORT != 0
5941         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5942                 int fw_len;
5943
5944                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5945                           TG3_TSO5_FW_RODATA_LEN +
5946                           TG3_TSO5_FW_DATA_LEN +
5947                           TG3_TSO5_FW_SBSS_LEN +
5948                           TG3_TSO5_FW_BSS_LEN);
5949                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5950                 tw32(BUFMGR_MB_POOL_ADDR,
5951                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5952                 tw32(BUFMGR_MB_POOL_SIZE,
5953                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5954         }
5955 #endif
5956
5957         if (tp->dev->mtu <= ETH_DATA_LEN) {
5958                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5959                      tp->bufmgr_config.mbuf_read_dma_low_water);
5960                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5961                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5962                 tw32(BUFMGR_MB_HIGH_WATER,
5963                      tp->bufmgr_config.mbuf_high_water);
5964         } else {
5965                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5966                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5967                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5968                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5969                 tw32(BUFMGR_MB_HIGH_WATER,
5970                      tp->bufmgr_config.mbuf_high_water_jumbo);
5971         }
5972         tw32(BUFMGR_DMA_LOW_WATER,
5973              tp->bufmgr_config.dma_low_water);
5974         tw32(BUFMGR_DMA_HIGH_WATER,
5975              tp->bufmgr_config.dma_high_water);
5976
5977         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5978         for (i = 0; i < 2000; i++) {
5979                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5980                         break;
5981                 udelay(10);
5982         }
5983         if (i >= 2000) {
5984                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5985                        tp->dev->name);
5986                 return -ENODEV;
5987         }
5988
5989         /* Setup replenish threshold. */
5990         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5991
5992         /* Initialize TG3_BDINFO's at:
5993          *  RCVDBDI_STD_BD:     standard eth size rx ring
5994          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5995          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5996          *
5997          * like so:
5998          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5999          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6000          *                              ring attribute flags
6001          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6002          *
6003          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6004          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6005          *
6006          * The size of each ring is fixed in the firmware, but the location is
6007          * configurable.
6008          */
6009         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6010              ((u64) tp->rx_std_mapping >> 32));
6011         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6012              ((u64) tp->rx_std_mapping & 0xffffffff));
6013         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6014              NIC_SRAM_RX_BUFFER_DESC);
6015
6016         /* Don't even try to program the JUMBO/MINI buffer descriptor
6017          * configs on 5705.
6018          */
6019         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6020                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6021                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6022         } else {
6023                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6024                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6025
6026                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6027                      BDINFO_FLAGS_DISABLED);
6028
6029                 /* Setup replenish threshold. */
6030                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6031
6032                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6033                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6034                              ((u64) tp->rx_jumbo_mapping >> 32));
6035                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6036                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6037                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6038                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6039                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6040                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6041                 } else {
6042                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6043                              BDINFO_FLAGS_DISABLED);
6044                 }
6045
6046         }
6047
6048         /* There is only one send ring on 5705/5750, no need to explicitly
6049          * disable the others.
6050          */
6051         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6052                 /* Clear out send RCB ring in SRAM. */
6053                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6054                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6055                                       BDINFO_FLAGS_DISABLED);
6056         }
6057
6058         tp->tx_prod = 0;
6059         tp->tx_cons = 0;
6060         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6061         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6062
6063         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6064                        tp->tx_desc_mapping,
6065                        (TG3_TX_RING_SIZE <<
6066                         BDINFO_FLAGS_MAXLEN_SHIFT),
6067                        NIC_SRAM_TX_BUFFER_DESC);
6068
6069         /* There is only one receive return ring on 5705/5750, no need
6070          * to explicitly disable the others.
6071          */
6072         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6073                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6074                      i += TG3_BDINFO_SIZE) {
6075                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6076                                       BDINFO_FLAGS_DISABLED);
6077                 }
6078         }
6079
6080         tp->rx_rcb_ptr = 0;
6081         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6082
6083         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6084                        tp->rx_rcb_mapping,
6085                        (TG3_RX_RCB_RING_SIZE(tp) <<
6086                         BDINFO_FLAGS_MAXLEN_SHIFT),
6087                        0);
6088
6089         tp->rx_std_ptr = tp->rx_pending;
6090         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6091                      tp->rx_std_ptr);
6092
6093         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6094                                                 tp->rx_jumbo_pending : 0;
6095         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6096                      tp->rx_jumbo_ptr);
6097
6098         /* Initialize MAC address and backoff seed. */
6099         __tg3_set_mac_addr(tp);
6100
6101         /* MTU + ethernet header + FCS + optional VLAN tag */
6102         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6103
6104         /* The slot time is changed by tg3_setup_phy if we
6105          * run at gigabit with half duplex.
6106          */
6107         tw32(MAC_TX_LENGTHS,
6108              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6109              (6 << TX_LENGTHS_IPG_SHIFT) |
6110              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6111
6112         /* Receive rules. */
6113         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6114         tw32(RCVLPC_CONFIG, 0x0181);
6115
6116         /* Calculate RDMAC_MODE setting early, we need it to determine
6117          * the RCVLPC_STATE_ENABLE mask.
6118          */
6119         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6120                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6121                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6122                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6123                       RDMAC_MODE_LNGREAD_ENAB);
6124         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6125                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6126
6127         /* If statement applies to 5705 and 5750 PCI devices only */
6128         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6129              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6130             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6131                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6132                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6133                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6134                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6135                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6136                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6137                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6138                 }
6139         }
6140
6141         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6142                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6143
6144 #if TG3_TSO_SUPPORT != 0
6145         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6146                 rdmac_mode |= (1 << 27);
6147 #endif
6148
6149         /* Receive/send statistics. */
6150         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6151             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6152                 val = tr32(RCVLPC_STATS_ENABLE);
6153                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6154                 tw32(RCVLPC_STATS_ENABLE, val);
6155         } else {
6156                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6157         }
6158         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6159         tw32(SNDDATAI_STATSENAB, 0xffffff);
6160         tw32(SNDDATAI_STATSCTRL,
6161              (SNDDATAI_SCTRL_ENABLE |
6162               SNDDATAI_SCTRL_FASTUPD));
6163
6164         /* Setup host coalescing engine. */
6165         tw32(HOSTCC_MODE, 0);
6166         for (i = 0; i < 2000; i++) {
6167                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6168                         break;
6169                 udelay(10);
6170         }
6171
6172         __tg3_set_coalesce(tp, &tp->coal);
6173
6174         /* set status block DMA address */
6175         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6176              ((u64) tp->status_mapping >> 32));
6177         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6178              ((u64) tp->status_mapping & 0xffffffff));
6179
6180         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6181                 /* Status/statistics block address.  See tg3_timer,
6182                  * the tg3_periodic_fetch_stats call there, and
6183                  * tg3_get_stats to see how this works for 5705/5750 chips.
6184                  */
6185                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6186                      ((u64) tp->stats_mapping >> 32));
6187                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6188                      ((u64) tp->stats_mapping & 0xffffffff));
6189                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6190                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6191         }
6192
6193         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6194
6195         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6196         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6197         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6198                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6199
6200         /* Clear statistics/status block in chip, and status block in ram. */
6201         for (i = NIC_SRAM_STATS_BLK;
6202              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6203              i += sizeof(u32)) {
6204                 tg3_write_mem(tp, i, 0);
6205                 udelay(40);
6206         }
6207         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6208
6209         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6210                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6211                 /* reset to prevent losing 1st rx packet intermittently */
6212                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6213                 udelay(10);
6214         }
6215
6216         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6217                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6218         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6219         udelay(40);
6220
6221         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6222          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6223          * register to preserve the GPIO settings for LOMs. The GPIOs,
6224          * whether used as inputs or outputs, are set by boot code after
6225          * reset.
6226          */
6227         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6228                 u32 gpio_mask;
6229
6230                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6231                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6232
6233                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6234                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6235                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6236
6237                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6238                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6239
6240                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6241
6242                 /* GPIO1 must be driven high for eeprom write protect */
6243                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6244                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6245         }
6246         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6247         udelay(100);
6248
6249         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6250         tp->last_tag = 0;
6251
6252         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6253                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6254                 udelay(40);
6255         }
6256
6257         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6258                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6259                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6260                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6261                WDMAC_MODE_LNGREAD_ENAB);
6262
6263         /* If statement applies to 5705 and 5750 PCI devices only */
6264         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6265              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6266             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6267                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6268                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6269                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6270                         /* nothing */
6271                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6272                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6273                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6274                         val |= WDMAC_MODE_RX_ACCEL;
6275                 }
6276         }
6277
6278         /* Enable host coalescing bug fix */
6279         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6280             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6281                 val |= (1 << 29);
6282
6283         tw32_f(WDMAC_MODE, val);
6284         udelay(40);
6285
6286         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6287                 val = tr32(TG3PCI_X_CAPS);
6288                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6289                         val &= ~PCIX_CAPS_BURST_MASK;
6290                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6291                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6292                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6293                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6294                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6295                                 val |= (tp->split_mode_max_reqs <<
6296                                         PCIX_CAPS_SPLIT_SHIFT);
6297                 }
6298                 tw32(TG3PCI_X_CAPS, val);
6299         }
6300
6301         tw32_f(RDMAC_MODE, rdmac_mode);
6302         udelay(40);
6303
6304         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6305         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6306                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6307         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6308         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6309         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6310         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6311         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6312 #if TG3_TSO_SUPPORT != 0
6313         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6314                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6315 #endif
6316         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6317         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6318
6319         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6320                 err = tg3_load_5701_a0_firmware_fix(tp);
6321                 if (err)
6322                         return err;
6323         }
6324
6325 #if TG3_TSO_SUPPORT != 0
6326         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6327                 err = tg3_load_tso_firmware(tp);
6328                 if (err)
6329                         return err;
6330         }
6331 #endif
6332
6333         tp->tx_mode = TX_MODE_ENABLE;
6334         tw32_f(MAC_TX_MODE, tp->tx_mode);
6335         udelay(100);
6336
6337         tp->rx_mode = RX_MODE_ENABLE;
6338         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6339                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6340
6341         tw32_f(MAC_RX_MODE, tp->rx_mode);
6342         udelay(10);
6343
6344         if (tp->link_config.phy_is_low_power) {
6345                 tp->link_config.phy_is_low_power = 0;
6346                 tp->link_config.speed = tp->link_config.orig_speed;
6347                 tp->link_config.duplex = tp->link_config.orig_duplex;
6348                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6349         }
6350
6351         tp->mi_mode = MAC_MI_MODE_BASE;
6352         tw32_f(MAC_MI_MODE, tp->mi_mode);
6353         udelay(80);
6354
6355         tw32(MAC_LED_CTRL, tp->led_ctrl);
6356
6357         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6358         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6359                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6360                 udelay(10);
6361         }
6362         tw32_f(MAC_RX_MODE, tp->rx_mode);
6363         udelay(10);
6364
6365         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6366                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6367                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6368                         /* Set drive transmission level to 1.2V  */
6369                         /* only if the signal pre-emphasis bit is not set  */
6370                         val = tr32(MAC_SERDES_CFG);
6371                         val &= 0xfffff000;
6372                         val |= 0x880;
6373                         tw32(MAC_SERDES_CFG, val);
6374                 }
6375                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6376                         tw32(MAC_SERDES_CFG, 0x616000);
6377         }
6378
6379         /* Prevent chip from dropping frames when flow control
6380          * is enabled.
6381          */
6382         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6383
6384         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6385             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6386                 /* Use hardware link auto-negotiation */
6387                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6388         }
6389
6390         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6391             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6392                 u32 tmp;
6393
6394                 tmp = tr32(SERDES_RX_CTRL);
6395                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6396                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6397                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6398                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6399         }
6400
6401         err = tg3_setup_phy(tp, reset_phy);
6402         if (err)
6403                 return err;
6404
6405         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6406                 u32 tmp;
6407
6408                 /* Clear CRC stats. */
6409                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6410                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6411                         tg3_readphy(tp, 0x14, &tmp);
6412                 }
6413         }
6414
6415         __tg3_set_rx_mode(tp->dev);
6416
6417         /* Initialize receive rules. */
6418         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6419         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6420         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6421         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6422
6423         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6424             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6425                 limit = 8;
6426         else
6427                 limit = 16;
6428         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6429                 limit -= 4;
6430         switch (limit) {
6431         case 16:
6432                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6433         case 15:
6434                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6435         case 14:
6436                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6437         case 13:
6438                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6439         case 12:
6440                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6441         case 11:
6442                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6443         case 10:
6444                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6445         case 9:
6446                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6447         case 8:
6448                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6449         case 7:
6450                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6451         case 6:
6452                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6453         case 5:
6454                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6455         case 4:
6456                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6457         case 3:
6458                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6459         case 2:
6460         case 1:
6461
6462         default:
6463                 break;
6464         };
6465
6466         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6467
6468         return 0;
6469 }
6470
6471 /* Called at device open time to get the chip ready for
6472  * packet processing.  Invoked with tp->lock held.
6473  */
6474 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6475 {
6476         int err;
6477
6478         /* Force the chip into D0. */
6479         err = tg3_set_power_state(tp, PCI_D0);
6480         if (err)
6481                 goto out;
6482
6483         tg3_switch_clocks(tp);
6484
6485         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6486
6487         err = tg3_reset_hw(tp, reset_phy);
6488
6489 out:
6490         return err;
6491 }
6492
6493 #define TG3_STAT_ADD32(PSTAT, REG) \
6494 do {    u32 __val = tr32(REG); \
6495         (PSTAT)->low += __val; \
6496         if ((PSTAT)->low < __val) \
6497                 (PSTAT)->high += 1; \
6498 } while (0)
6499
6500 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6501 {
6502         struct tg3_hw_stats *sp = tp->hw_stats;
6503
6504         if (!netif_carrier_ok(tp->dev))
6505                 return;
6506
6507         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6508         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6509         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6510         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6511         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6512         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6513         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6514         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6515         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6516         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6517         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6518         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6519         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6520
6521         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6522         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6523         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6524         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6525         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6526         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6527         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6528         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6529         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6530         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6531         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6532         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6533         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6534         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6535
6536         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6537         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6538         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6539 }
6540
6541 static void tg3_timer(unsigned long __opaque)
6542 {
6543         struct tg3 *tp = (struct tg3 *) __opaque;
6544
6545         if (tp->irq_sync)
6546                 goto restart_timer;
6547
6548         spin_lock(&tp->lock);
6549
6550         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6551                 /* All of this garbage is because when using non-tagged
6552                  * IRQ status the mailbox/status_block protocol the chip
6553                  * uses with the cpu is race prone.
6554                  */
6555                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6556                         tw32(GRC_LOCAL_CTRL,
6557                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6558                 } else {
6559                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6560                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6561                 }
6562
6563                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6564                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6565                         spin_unlock(&tp->lock);
6566                         schedule_work(&tp->reset_task);
6567                         return;
6568                 }
6569         }
6570
6571         /* This part only runs once per second. */
6572         if (!--tp->timer_counter) {
6573                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6574                         tg3_periodic_fetch_stats(tp);
6575
6576                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6577                         u32 mac_stat;
6578                         int phy_event;
6579
6580                         mac_stat = tr32(MAC_STATUS);
6581
6582                         phy_event = 0;
6583                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6584                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6585                                         phy_event = 1;
6586                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6587                                 phy_event = 1;
6588
6589                         if (phy_event)
6590                                 tg3_setup_phy(tp, 0);
6591                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6592                         u32 mac_stat = tr32(MAC_STATUS);
6593                         int need_setup = 0;
6594
6595                         if (netif_carrier_ok(tp->dev) &&
6596                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6597                                 need_setup = 1;
6598                         }
6599                         if (! netif_carrier_ok(tp->dev) &&
6600                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6601                                          MAC_STATUS_SIGNAL_DET))) {
6602                                 need_setup = 1;
6603                         }
6604                         if (need_setup) {
6605                                 tw32_f(MAC_MODE,
6606                                      (tp->mac_mode &
6607                                       ~MAC_MODE_PORT_MODE_MASK));
6608                                 udelay(40);
6609                                 tw32_f(MAC_MODE, tp->mac_mode);
6610                                 udelay(40);
6611                                 tg3_setup_phy(tp, 0);
6612                         }
6613                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6614                         tg3_serdes_parallel_detect(tp);
6615
6616                 tp->timer_counter = tp->timer_multiplier;
6617         }
6618
6619         /* Heartbeat is only sent once every 2 seconds.  */
6620         if (!--tp->asf_counter) {
6621                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6622                         u32 val;
6623
6624                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6625                                       FWCMD_NICDRV_ALIVE2);
6626                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6627                         /* 5 seconds timeout */
6628                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6629                         val = tr32(GRC_RX_CPU_EVENT);
6630                         val |= (1 << 14);
6631                         tw32(GRC_RX_CPU_EVENT, val);
6632                 }
6633                 tp->asf_counter = tp->asf_multiplier;
6634         }
6635
6636         spin_unlock(&tp->lock);
6637
6638 restart_timer:
6639         tp->timer.expires = jiffies + tp->timer_offset;
6640         add_timer(&tp->timer);
6641 }
6642
6643 static int tg3_request_irq(struct tg3 *tp)
6644 {
6645         irqreturn_t (*fn)(int, void *, struct pt_regs *);
6646         unsigned long flags;
6647         struct net_device *dev = tp->dev;
6648
6649         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6650                 fn = tg3_msi;
6651                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6652                         fn = tg3_msi_1shot;
6653                 flags = SA_SAMPLE_RANDOM;
6654         } else {
6655                 fn = tg3_interrupt;
6656                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6657                         fn = tg3_interrupt_tagged;
6658                 flags = SA_SHIRQ | SA_SAMPLE_RANDOM;
6659         }
6660         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6661 }
6662
6663 static int tg3_test_interrupt(struct tg3 *tp)
6664 {
6665         struct net_device *dev = tp->dev;
6666         int err, i;
6667         u32 int_mbox = 0;
6668
6669         if (!netif_running(dev))
6670                 return -ENODEV;
6671
6672         tg3_disable_ints(tp);
6673
6674         free_irq(tp->pdev->irq, dev);
6675
6676         err = request_irq(tp->pdev->irq, tg3_test_isr,
6677                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6678         if (err)
6679                 return err;
6680
6681         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6682         tg3_enable_ints(tp);
6683
6684         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6685                HOSTCC_MODE_NOW);
6686
6687         for (i = 0; i < 5; i++) {
6688                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6689                                         TG3_64BIT_REG_LOW);
6690                 if (int_mbox != 0)
6691                         break;
6692                 msleep(10);
6693         }
6694
6695         tg3_disable_ints(tp);
6696
6697         free_irq(tp->pdev->irq, dev);
6698         
6699         err = tg3_request_irq(tp);
6700
6701         if (err)
6702                 return err;
6703
6704         if (int_mbox != 0)
6705                 return 0;
6706
6707         return -EIO;
6708 }
6709
6710 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6711  * successfully restored
6712  */
6713 static int tg3_test_msi(struct tg3 *tp)
6714 {
6715         struct net_device *dev = tp->dev;
6716         int err;
6717         u16 pci_cmd;
6718
6719         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6720                 return 0;
6721
6722         /* Turn off SERR reporting in case MSI terminates with Master
6723          * Abort.
6724          */
6725         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6726         pci_write_config_word(tp->pdev, PCI_COMMAND,
6727                               pci_cmd & ~PCI_COMMAND_SERR);
6728
6729         err = tg3_test_interrupt(tp);
6730
6731         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6732
6733         if (!err)
6734                 return 0;
6735
6736         /* other failures */
6737         if (err != -EIO)
6738                 return err;
6739
6740         /* MSI test failed, go back to INTx mode */
6741         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6742                "switching to INTx mode. Please report this failure to "
6743                "the PCI maintainer and include system chipset information.\n",
6744                        tp->dev->name);
6745
6746         free_irq(tp->pdev->irq, dev);
6747         pci_disable_msi(tp->pdev);
6748
6749         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6750
6751         err = tg3_request_irq(tp);
6752         if (err)
6753                 return err;
6754
6755         /* Need to reset the chip because the MSI cycle may have terminated
6756          * with Master Abort.
6757          */
6758         tg3_full_lock(tp, 1);
6759
6760         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6761         err = tg3_init_hw(tp, 1);
6762
6763         tg3_full_unlock(tp);
6764
6765         if (err)
6766                 free_irq(tp->pdev->irq, dev);
6767
6768         return err;
6769 }
6770
6771 static int tg3_open(struct net_device *dev)
6772 {
6773         struct tg3 *tp = netdev_priv(dev);
6774         int err;
6775
6776         tg3_full_lock(tp, 0);
6777
6778         err = tg3_set_power_state(tp, PCI_D0);
6779         if (err)
6780                 return err;
6781
6782         tg3_disable_ints(tp);
6783         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6784
6785         tg3_full_unlock(tp);
6786
6787         /* The placement of this call is tied
6788          * to the setup and use of Host TX descriptors.
6789          */
6790         err = tg3_alloc_consistent(tp);
6791         if (err)
6792                 return err;
6793
6794         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6795             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6796             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6797             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6798               (tp->pdev_peer == tp->pdev))) {
6799                 /* All MSI supporting chips should support tagged
6800                  * status.  Assert that this is the case.
6801                  */
6802                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6803                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6804                                "Not using MSI.\n", tp->dev->name);
6805                 } else if (pci_enable_msi(tp->pdev) == 0) {
6806                         u32 msi_mode;
6807
6808                         msi_mode = tr32(MSGINT_MODE);
6809                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6810                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6811                 }
6812         }
6813         err = tg3_request_irq(tp);
6814
6815         if (err) {
6816                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6817                         pci_disable_msi(tp->pdev);
6818                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6819                 }
6820                 tg3_free_consistent(tp);
6821                 return err;
6822         }
6823
6824         tg3_full_lock(tp, 0);
6825
6826         err = tg3_init_hw(tp, 1);
6827         if (err) {
6828                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6829                 tg3_free_rings(tp);
6830         } else {
6831                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6832                         tp->timer_offset = HZ;
6833                 else
6834                         tp->timer_offset = HZ / 10;
6835
6836                 BUG_ON(tp->timer_offset > HZ);
6837                 tp->timer_counter = tp->timer_multiplier =
6838                         (HZ / tp->timer_offset);
6839                 tp->asf_counter = tp->asf_multiplier =
6840                         ((HZ / tp->timer_offset) * 2);
6841
6842                 init_timer(&tp->timer);
6843                 tp->timer.expires = jiffies + tp->timer_offset;
6844                 tp->timer.data = (unsigned long) tp;
6845                 tp->timer.function = tg3_timer;
6846         }
6847
6848         tg3_full_unlock(tp);
6849
6850         if (err) {
6851                 free_irq(tp->pdev->irq, dev);
6852                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6853                         pci_disable_msi(tp->pdev);
6854                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6855                 }
6856                 tg3_free_consistent(tp);
6857                 return err;
6858         }
6859
6860         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6861                 err = tg3_test_msi(tp);
6862
6863                 if (err) {
6864                         tg3_full_lock(tp, 0);
6865
6866                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6867                                 pci_disable_msi(tp->pdev);
6868                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6869                         }
6870                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6871                         tg3_free_rings(tp);
6872                         tg3_free_consistent(tp);
6873
6874                         tg3_full_unlock(tp);
6875
6876                         return err;
6877                 }
6878
6879                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6880                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6881                                 u32 val = tr32(0x7c04);
6882
6883                                 tw32(0x7c04, val | (1 << 29));
6884                         }
6885                 }
6886         }
6887
6888         tg3_full_lock(tp, 0);
6889
6890         add_timer(&tp->timer);
6891         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6892         tg3_enable_ints(tp);
6893
6894         tg3_full_unlock(tp);
6895
6896         netif_start_queue(dev);
6897
6898         return 0;
6899 }
6900
6901 #if 0
6902 /*static*/ void tg3_dump_state(struct tg3 *tp)
6903 {
6904         u32 val32, val32_2, val32_3, val32_4, val32_5;
6905         u16 val16;
6906         int i;
6907
6908         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6909         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6910         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6911                val16, val32);
6912
6913         /* MAC block */
6914         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6915                tr32(MAC_MODE), tr32(MAC_STATUS));
6916         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6917                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6918         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6919                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6920         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6921                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6922
6923         /* Send data initiator control block */
6924         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6925                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6926         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6927                tr32(SNDDATAI_STATSCTRL));
6928
6929         /* Send data completion control block */
6930         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6931
6932         /* Send BD ring selector block */
6933         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6934                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6935
6936         /* Send BD initiator control block */
6937         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6938                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6939
6940         /* Send BD completion control block */
6941         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6942
6943         /* Receive list placement control block */
6944         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6945                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6946         printk("       RCVLPC_STATSCTRL[%08x]\n",
6947                tr32(RCVLPC_STATSCTRL));
6948
6949         /* Receive data and receive BD initiator control block */
6950         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6951                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6952
6953         /* Receive data completion control block */
6954         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6955                tr32(RCVDCC_MODE));
6956
6957         /* Receive BD initiator control block */
6958         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6959                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6960
6961         /* Receive BD completion control block */
6962         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6963                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6964
6965         /* Receive list selector control block */
6966         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6967                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6968
6969         /* Mbuf cluster free block */
6970         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6971                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6972
6973         /* Host coalescing control block */
6974         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6975                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6976         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6977                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6978                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6979         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6980                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6981                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6982         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6983                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6984         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6985                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6986
6987         /* Memory arbiter control block */
6988         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6989                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6990
6991         /* Buffer manager control block */
6992         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6993                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6994         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6995                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6996         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6997                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6998                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6999                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7000
7001         /* Read DMA control block */
7002         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7003                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7004
7005         /* Write DMA control block */
7006         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7007                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7008
7009         /* DMA completion block */
7010         printk("DEBUG: DMAC_MODE[%08x]\n",
7011                tr32(DMAC_MODE));
7012
7013         /* GRC block */
7014         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7015                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7016         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7017                tr32(GRC_LOCAL_CTRL));
7018
7019         /* TG3_BDINFOs */
7020         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7021                tr32(RCVDBDI_JUMBO_BD + 0x0),
7022                tr32(RCVDBDI_JUMBO_BD + 0x4),
7023                tr32(RCVDBDI_JUMBO_BD + 0x8),
7024                tr32(RCVDBDI_JUMBO_BD + 0xc));
7025         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7026                tr32(RCVDBDI_STD_BD + 0x0),
7027                tr32(RCVDBDI_STD_BD + 0x4),
7028                tr32(RCVDBDI_STD_BD + 0x8),
7029                tr32(RCVDBDI_STD_BD + 0xc));
7030         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7031                tr32(RCVDBDI_MINI_BD + 0x0),
7032                tr32(RCVDBDI_MINI_BD + 0x4),
7033                tr32(RCVDBDI_MINI_BD + 0x8),
7034                tr32(RCVDBDI_MINI_BD + 0xc));
7035
7036         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7037         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7038         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7039         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7040         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7041                val32, val32_2, val32_3, val32_4);
7042
7043         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7044         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7045         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7046         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7047         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7048                val32, val32_2, val32_3, val32_4);
7049
7050         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7051         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7052         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7053         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7054         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7055         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7056                val32, val32_2, val32_3, val32_4, val32_5);
7057
7058         /* SW status block */
7059         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7060                tp->hw_status->status,
7061                tp->hw_status->status_tag,
7062                tp->hw_status->rx_jumbo_consumer,
7063                tp->hw_status->rx_consumer,
7064                tp->hw_status->rx_mini_consumer,
7065                tp->hw_status->idx[0].rx_producer,
7066                tp->hw_status->idx[0].tx_consumer);
7067
7068         /* SW statistics block */
7069         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7070                ((u32 *)tp->hw_stats)[0],
7071                ((u32 *)tp->hw_stats)[1],
7072                ((u32 *)tp->hw_stats)[2],
7073                ((u32 *)tp->hw_stats)[3]);
7074
7075         /* Mailboxes */
7076         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7077                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7078                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7079                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7080                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7081
7082         /* NIC side send descriptors. */
7083         for (i = 0; i < 6; i++) {
7084                 unsigned long txd;
7085
7086                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7087                         + (i * sizeof(struct tg3_tx_buffer_desc));
7088                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7089                        i,
7090                        readl(txd + 0x0), readl(txd + 0x4),
7091                        readl(txd + 0x8), readl(txd + 0xc));
7092         }
7093
7094         /* NIC side RX descriptors. */
7095         for (i = 0; i < 6; i++) {
7096                 unsigned long rxd;
7097
7098                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7099                         + (i * sizeof(struct tg3_rx_buffer_desc));
7100                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7101                        i,
7102                        readl(rxd + 0x0), readl(rxd + 0x4),
7103                        readl(rxd + 0x8), readl(rxd + 0xc));
7104                 rxd += (4 * sizeof(u32));
7105                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7106                        i,
7107                        readl(rxd + 0x0), readl(rxd + 0x4),
7108                        readl(rxd + 0x8), readl(rxd + 0xc));
7109         }
7110
7111         for (i = 0; i < 6; i++) {
7112                 unsigned long rxd;
7113
7114                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7115                         + (i * sizeof(struct tg3_rx_buffer_desc));
7116                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7117                        i,
7118                        readl(rxd + 0x0), readl(rxd + 0x4),
7119                        readl(rxd + 0x8), readl(rxd + 0xc));
7120                 rxd += (4 * sizeof(u32));
7121                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7122                        i,
7123                        readl(rxd + 0x0), readl(rxd + 0x4),
7124                        readl(rxd + 0x8), readl(rxd + 0xc));
7125         }
7126 }
7127 #endif
7128
7129 static struct net_device_stats *tg3_get_stats(struct net_device *);
7130 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7131
7132 static int tg3_close(struct net_device *dev)
7133 {
7134         struct tg3 *tp = netdev_priv(dev);
7135
7136         /* Calling flush_scheduled_work() may deadlock because
7137          * linkwatch_event() may be on the workqueue and it will try to get
7138          * the rtnl_lock which we are holding.
7139          */
7140         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7141                 msleep(1);
7142
7143         netif_stop_queue(dev);
7144
7145         del_timer_sync(&tp->timer);
7146
7147         tg3_full_lock(tp, 1);
7148 #if 0
7149         tg3_dump_state(tp);
7150 #endif
7151
7152         tg3_disable_ints(tp);
7153
7154         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7155         tg3_free_rings(tp);
7156         tp->tg3_flags &=
7157                 ~(TG3_FLAG_INIT_COMPLETE |
7158                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7159
7160         tg3_full_unlock(tp);
7161
7162         free_irq(tp->pdev->irq, dev);
7163         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7164                 pci_disable_msi(tp->pdev);
7165                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7166         }
7167
7168         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7169                sizeof(tp->net_stats_prev));
7170         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7171                sizeof(tp->estats_prev));
7172
7173         tg3_free_consistent(tp);
7174
7175         tg3_set_power_state(tp, PCI_D3hot);
7176
7177         netif_carrier_off(tp->dev);
7178
7179         return 0;
7180 }
7181
7182 static inline unsigned long get_stat64(tg3_stat64_t *val)
7183 {
7184         unsigned long ret;
7185
7186 #if (BITS_PER_LONG == 32)
7187         ret = val->low;
7188 #else
7189         ret = ((u64)val->high << 32) | ((u64)val->low);
7190 #endif
7191         return ret;
7192 }
7193
7194 static unsigned long calc_crc_errors(struct tg3 *tp)
7195 {
7196         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7197
7198         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7199             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7200              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7201                 u32 val;
7202
7203                 spin_lock_bh(&tp->lock);
7204                 if (!tg3_readphy(tp, 0x1e, &val)) {
7205                         tg3_writephy(tp, 0x1e, val | 0x8000);
7206                         tg3_readphy(tp, 0x14, &val);
7207                 } else
7208                         val = 0;
7209                 spin_unlock_bh(&tp->lock);
7210
7211                 tp->phy_crc_errors += val;
7212
7213                 return tp->phy_crc_errors;
7214         }
7215
7216         return get_stat64(&hw_stats->rx_fcs_errors);
7217 }
7218
7219 #define ESTAT_ADD(member) \
7220         estats->member =        old_estats->member + \
7221                                 get_stat64(&hw_stats->member)
7222
7223 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7224 {
7225         struct tg3_ethtool_stats *estats = &tp->estats;
7226         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7227         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7228
7229         if (!hw_stats)
7230                 return old_estats;
7231
7232         ESTAT_ADD(rx_octets);
7233         ESTAT_ADD(rx_fragments);
7234         ESTAT_ADD(rx_ucast_packets);
7235         ESTAT_ADD(rx_mcast_packets);
7236         ESTAT_ADD(rx_bcast_packets);
7237         ESTAT_ADD(rx_fcs_errors);
7238         ESTAT_ADD(rx_align_errors);
7239         ESTAT_ADD(rx_xon_pause_rcvd);
7240         ESTAT_ADD(rx_xoff_pause_rcvd);
7241         ESTAT_ADD(rx_mac_ctrl_rcvd);
7242         ESTAT_ADD(rx_xoff_entered);
7243         ESTAT_ADD(rx_frame_too_long_errors);
7244         ESTAT_ADD(rx_jabbers);
7245         ESTAT_ADD(rx_undersize_packets);
7246         ESTAT_ADD(rx_in_length_errors);
7247         ESTAT_ADD(rx_out_length_errors);
7248         ESTAT_ADD(rx_64_or_less_octet_packets);
7249         ESTAT_ADD(rx_65_to_127_octet_packets);
7250         ESTAT_ADD(rx_128_to_255_octet_packets);
7251         ESTAT_ADD(rx_256_to_511_octet_packets);
7252         ESTAT_ADD(rx_512_to_1023_octet_packets);
7253         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7254         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7255         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7256         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7257         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7258
7259         ESTAT_ADD(tx_octets);
7260         ESTAT_ADD(tx_collisions);
7261         ESTAT_ADD(tx_xon_sent);
7262         ESTAT_ADD(tx_xoff_sent);
7263         ESTAT_ADD(tx_flow_control);
7264         ESTAT_ADD(tx_mac_errors);
7265         ESTAT_ADD(tx_single_collisions);
7266         ESTAT_ADD(tx_mult_collisions);
7267         ESTAT_ADD(tx_deferred);
7268         ESTAT_ADD(tx_excessive_collisions);
7269         ESTAT_ADD(tx_late_collisions);
7270         ESTAT_ADD(tx_collide_2times);
7271         ESTAT_ADD(tx_collide_3times);
7272         ESTAT_ADD(tx_collide_4times);
7273         ESTAT_ADD(tx_collide_5times);
7274         ESTAT_ADD(tx_collide_6times);
7275         ESTAT_ADD(tx_collide_7times);
7276         ESTAT_ADD(tx_collide_8times);
7277         ESTAT_ADD(tx_collide_9times);
7278         ESTAT_ADD(tx_collide_10times);
7279         ESTAT_ADD(tx_collide_11times);
7280         ESTAT_ADD(tx_collide_12times);
7281         ESTAT_ADD(tx_collide_13times);
7282         ESTAT_ADD(tx_collide_14times);
7283         ESTAT_ADD(tx_collide_15times);
7284         ESTAT_ADD(tx_ucast_packets);
7285         ESTAT_ADD(tx_mcast_packets);
7286         ESTAT_ADD(tx_bcast_packets);
7287         ESTAT_ADD(tx_carrier_sense_errors);
7288         ESTAT_ADD(tx_discards);
7289         ESTAT_ADD(tx_errors);
7290
7291         ESTAT_ADD(dma_writeq_full);
7292         ESTAT_ADD(dma_write_prioq_full);
7293         ESTAT_ADD(rxbds_empty);
7294         ESTAT_ADD(rx_discards);
7295         ESTAT_ADD(rx_errors);
7296         ESTAT_ADD(rx_threshold_hit);
7297
7298         ESTAT_ADD(dma_readq_full);
7299         ESTAT_ADD(dma_read_prioq_full);
7300         ESTAT_ADD(tx_comp_queue_full);
7301
7302         ESTAT_ADD(ring_set_send_prod_index);
7303         ESTAT_ADD(ring_status_update);
7304         ESTAT_ADD(nic_irqs);
7305         ESTAT_ADD(nic_avoided_irqs);
7306         ESTAT_ADD(nic_tx_threshold_hit);
7307
7308         return estats;
7309 }
7310
7311 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7312 {
7313         struct tg3 *tp = netdev_priv(dev);
7314         struct net_device_stats *stats = &tp->net_stats;
7315         struct net_device_stats *old_stats = &tp->net_stats_prev;
7316         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7317
7318         if (!hw_stats)
7319                 return old_stats;
7320
7321         stats->rx_packets = old_stats->rx_packets +
7322                 get_stat64(&hw_stats->rx_ucast_packets) +
7323                 get_stat64(&hw_stats->rx_mcast_packets) +
7324                 get_stat64(&hw_stats->rx_bcast_packets);
7325                 
7326         stats->tx_packets = old_stats->tx_packets +
7327                 get_stat64(&hw_stats->tx_ucast_packets) +
7328                 get_stat64(&hw_stats->tx_mcast_packets) +
7329                 get_stat64(&hw_stats->tx_bcast_packets);
7330
7331         stats->rx_bytes = old_stats->rx_bytes +
7332                 get_stat64(&hw_stats->rx_octets);
7333         stats->tx_bytes = old_stats->tx_bytes +
7334                 get_stat64(&hw_stats->tx_octets);
7335
7336         stats->rx_errors = old_stats->rx_errors +
7337                 get_stat64(&hw_stats->rx_errors);
7338         stats->tx_errors = old_stats->tx_errors +
7339                 get_stat64(&hw_stats->tx_errors) +
7340                 get_stat64(&hw_stats->tx_mac_errors) +
7341                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7342                 get_stat64(&hw_stats->tx_discards);
7343
7344         stats->multicast = old_stats->multicast +
7345                 get_stat64(&hw_stats->rx_mcast_packets);
7346         stats->collisions = old_stats->collisions +
7347                 get_stat64(&hw_stats->tx_collisions);
7348
7349         stats->rx_length_errors = old_stats->rx_length_errors +
7350                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7351                 get_stat64(&hw_stats->rx_undersize_packets);
7352
7353         stats->rx_over_errors = old_stats->rx_over_errors +
7354                 get_stat64(&hw_stats->rxbds_empty);
7355         stats->rx_frame_errors = old_stats->rx_frame_errors +
7356                 get_stat64(&hw_stats->rx_align_errors);
7357         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7358                 get_stat64(&hw_stats->tx_discards);
7359         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7360                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7361
7362         stats->rx_crc_errors = old_stats->rx_crc_errors +
7363                 calc_crc_errors(tp);
7364
7365         stats->rx_missed_errors = old_stats->rx_missed_errors +
7366                 get_stat64(&hw_stats->rx_discards);
7367
7368         return stats;
7369 }
7370
7371 static inline u32 calc_crc(unsigned char *buf, int len)
7372 {
7373         u32 reg;
7374         u32 tmp;
7375         int j, k;
7376
7377         reg = 0xffffffff;
7378
7379         for (j = 0; j < len; j++) {
7380                 reg ^= buf[j];
7381
7382                 for (k = 0; k < 8; k++) {
7383                         tmp = reg & 0x01;
7384
7385                         reg >>= 1;
7386
7387                         if (tmp) {
7388                                 reg ^= 0xedb88320;
7389                         }
7390                 }
7391         }
7392
7393         return ~reg;
7394 }
7395
7396 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7397 {
7398         /* accept or reject all multicast frames */
7399         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7400         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7401         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7402         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7403 }
7404
7405 static void __tg3_set_rx_mode(struct net_device *dev)
7406 {
7407         struct tg3 *tp = netdev_priv(dev);
7408         u32 rx_mode;
7409
7410         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7411                                   RX_MODE_KEEP_VLAN_TAG);
7412
7413         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7414          * flag clear.
7415          */
7416 #if TG3_VLAN_TAG_USED
7417         if (!tp->vlgrp &&
7418             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7419                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7420 #else
7421         /* By definition, VLAN is disabled always in this
7422          * case.
7423          */
7424         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7425                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7426 #endif
7427
7428         if (dev->flags & IFF_PROMISC) {
7429                 /* Promiscuous mode. */
7430                 rx_mode |= RX_MODE_PROMISC;
7431         } else if (dev->flags & IFF_ALLMULTI) {
7432                 /* Accept all multicast. */
7433                 tg3_set_multi (tp, 1);
7434         } else if (dev->mc_count < 1) {
7435                 /* Reject all multicast. */
7436                 tg3_set_multi (tp, 0);
7437         } else {
7438                 /* Accept one or more multicast(s). */
7439                 struct dev_mc_list *mclist;
7440                 unsigned int i;
7441                 u32 mc_filter[4] = { 0, };
7442                 u32 regidx;
7443                 u32 bit;
7444                 u32 crc;
7445
7446                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7447                      i++, mclist = mclist->next) {
7448
7449                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7450                         bit = ~crc & 0x7f;
7451                         regidx = (bit & 0x60) >> 5;
7452                         bit &= 0x1f;
7453                         mc_filter[regidx] |= (1 << bit);
7454                 }
7455
7456                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7457                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7458                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7459                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7460         }
7461
7462         if (rx_mode != tp->rx_mode) {
7463                 tp->rx_mode = rx_mode;
7464                 tw32_f(MAC_RX_MODE, rx_mode);
7465                 udelay(10);
7466         }
7467 }
7468
7469 static void tg3_set_rx_mode(struct net_device *dev)
7470 {
7471         struct tg3 *tp = netdev_priv(dev);
7472
7473         if (!netif_running(dev))
7474                 return;
7475
7476         tg3_full_lock(tp, 0);
7477         __tg3_set_rx_mode(dev);
7478         tg3_full_unlock(tp);
7479 }
7480
7481 #define TG3_REGDUMP_LEN         (32 * 1024)
7482
7483 static int tg3_get_regs_len(struct net_device *dev)
7484 {
7485         return TG3_REGDUMP_LEN;
7486 }
7487
7488 static void tg3_get_regs(struct net_device *dev,
7489                 struct ethtool_regs *regs, void *_p)
7490 {
7491         u32 *p = _p;
7492         struct tg3 *tp = netdev_priv(dev);
7493         u8 *orig_p = _p;
7494         int i;
7495
7496         regs->version = 0;
7497
7498         memset(p, 0, TG3_REGDUMP_LEN);
7499
7500         if (tp->link_config.phy_is_low_power)
7501                 return;
7502
7503         tg3_full_lock(tp, 0);
7504
7505 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7506 #define GET_REG32_LOOP(base,len)                \
7507 do {    p = (u32 *)(orig_p + (base));           \
7508         for (i = 0; i < len; i += 4)            \
7509                 __GET_REG32((base) + i);        \
7510 } while (0)
7511 #define GET_REG32_1(reg)                        \
7512 do {    p = (u32 *)(orig_p + (reg));            \
7513         __GET_REG32((reg));                     \
7514 } while (0)
7515
7516         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7517         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7518         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7519         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7520         GET_REG32_1(SNDDATAC_MODE);
7521         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7522         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7523         GET_REG32_1(SNDBDC_MODE);
7524         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7525         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7526         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7527         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7528         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7529         GET_REG32_1(RCVDCC_MODE);
7530         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7531         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7532         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7533         GET_REG32_1(MBFREE_MODE);
7534         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7535         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7536         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7537         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7538         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7539         GET_REG32_1(RX_CPU_MODE);
7540         GET_REG32_1(RX_CPU_STATE);
7541         GET_REG32_1(RX_CPU_PGMCTR);
7542         GET_REG32_1(RX_CPU_HWBKPT);
7543         GET_REG32_1(TX_CPU_MODE);
7544         GET_REG32_1(TX_CPU_STATE);
7545         GET_REG32_1(TX_CPU_PGMCTR);
7546         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7547         GET_REG32_LOOP(FTQ_RESET, 0x120);
7548         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7549         GET_REG32_1(DMAC_MODE);
7550         GET_REG32_LOOP(GRC_MODE, 0x4c);
7551         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7552                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7553
7554 #undef __GET_REG32
7555 #undef GET_REG32_LOOP
7556 #undef GET_REG32_1
7557
7558         tg3_full_unlock(tp);
7559 }
7560
7561 static int tg3_get_eeprom_len(struct net_device *dev)
7562 {
7563         struct tg3 *tp = netdev_priv(dev);
7564
7565         return tp->nvram_size;
7566 }
7567
7568 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7569 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7570
7571 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7572 {
7573         struct tg3 *tp = netdev_priv(dev);
7574         int ret;
7575         u8  *pd;
7576         u32 i, offset, len, val, b_offset, b_count;
7577
7578         if (tp->link_config.phy_is_low_power)
7579                 return -EAGAIN;
7580
7581         offset = eeprom->offset;
7582         len = eeprom->len;
7583         eeprom->len = 0;
7584
7585         eeprom->magic = TG3_EEPROM_MAGIC;
7586
7587         if (offset & 3) {
7588                 /* adjustments to start on required 4 byte boundary */
7589                 b_offset = offset & 3;
7590                 b_count = 4 - b_offset;
7591                 if (b_count > len) {
7592                         /* i.e. offset=1 len=2 */
7593                         b_count = len;
7594                 }
7595                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7596                 if (ret)
7597                         return ret;
7598                 val = cpu_to_le32(val);
7599                 memcpy(data, ((char*)&val) + b_offset, b_count);
7600                 len -= b_count;
7601                 offset += b_count;
7602                 eeprom->len += b_count;
7603         }
7604
7605         /* read bytes upto the last 4 byte boundary */
7606         pd = &data[eeprom->len];
7607         for (i = 0; i < (len - (len & 3)); i += 4) {
7608                 ret = tg3_nvram_read(tp, offset + i, &val);
7609                 if (ret) {
7610                         eeprom->len += i;
7611                         return ret;
7612                 }
7613                 val = cpu_to_le32(val);
7614                 memcpy(pd + i, &val, 4);
7615         }
7616         eeprom->len += i;
7617
7618         if (len & 3) {
7619                 /* read last bytes not ending on 4 byte boundary */
7620                 pd = &data[eeprom->len];
7621                 b_count = len & 3;
7622                 b_offset = offset + len - b_count;
7623                 ret = tg3_nvram_read(tp, b_offset, &val);
7624                 if (ret)
7625                         return ret;
7626                 val = cpu_to_le32(val);
7627                 memcpy(pd, ((char*)&val), b_count);
7628                 eeprom->len += b_count;
7629         }
7630         return 0;
7631 }
7632
7633 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7634
7635 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7636 {
7637         struct tg3 *tp = netdev_priv(dev);
7638         int ret;
7639         u32 offset, len, b_offset, odd_len, start, end;
7640         u8 *buf;
7641
7642         if (tp->link_config.phy_is_low_power)
7643                 return -EAGAIN;
7644
7645         if (eeprom->magic != TG3_EEPROM_MAGIC)
7646                 return -EINVAL;
7647
7648         offset = eeprom->offset;
7649         len = eeprom->len;
7650
7651         if ((b_offset = (offset & 3))) {
7652                 /* adjustments to start on required 4 byte boundary */
7653                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7654                 if (ret)
7655                         return ret;
7656                 start = cpu_to_le32(start);
7657                 len += b_offset;
7658                 offset &= ~3;
7659                 if (len < 4)
7660                         len = 4;
7661         }
7662
7663         odd_len = 0;
7664         if (len & 3) {
7665                 /* adjustments to end on required 4 byte boundary */
7666                 odd_len = 1;
7667                 len = (len + 3) & ~3;
7668                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7669                 if (ret)
7670                         return ret;
7671                 end = cpu_to_le32(end);
7672         }
7673
7674         buf = data;
7675         if (b_offset || odd_len) {
7676                 buf = kmalloc(len, GFP_KERNEL);
7677                 if (buf == 0)
7678                         return -ENOMEM;
7679                 if (b_offset)
7680                         memcpy(buf, &start, 4);
7681                 if (odd_len)
7682                         memcpy(buf+len-4, &end, 4);
7683                 memcpy(buf + b_offset, data, eeprom->len);
7684         }
7685
7686         ret = tg3_nvram_write_block(tp, offset, len, buf);
7687
7688         if (buf != data)
7689                 kfree(buf);
7690
7691         return ret;
7692 }
7693
7694 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7695 {
7696         struct tg3 *tp = netdev_priv(dev);
7697   
7698         cmd->supported = (SUPPORTED_Autoneg);
7699
7700         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7701                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7702                                    SUPPORTED_1000baseT_Full);
7703
7704         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7705                 cmd->supported |= (SUPPORTED_100baseT_Half |
7706                                   SUPPORTED_100baseT_Full |
7707                                   SUPPORTED_10baseT_Half |
7708                                   SUPPORTED_10baseT_Full |
7709                                   SUPPORTED_MII);
7710                 cmd->port = PORT_TP;
7711         } else {
7712                 cmd->supported |= SUPPORTED_FIBRE;
7713                 cmd->port = PORT_FIBRE;
7714         }
7715   
7716         cmd->advertising = tp->link_config.advertising;
7717         if (netif_running(dev)) {
7718                 cmd->speed = tp->link_config.active_speed;
7719                 cmd->duplex = tp->link_config.active_duplex;
7720         }
7721         cmd->phy_address = PHY_ADDR;
7722         cmd->transceiver = 0;
7723         cmd->autoneg = tp->link_config.autoneg;
7724         cmd->maxtxpkt = 0;
7725         cmd->maxrxpkt = 0;
7726         return 0;
7727 }
7728   
7729 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7730 {
7731         struct tg3 *tp = netdev_priv(dev);
7732   
7733         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7734                 /* These are the only valid advertisement bits allowed.  */
7735                 if (cmd->autoneg == AUTONEG_ENABLE &&
7736                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7737                                           ADVERTISED_1000baseT_Full |
7738                                           ADVERTISED_Autoneg |
7739                                           ADVERTISED_FIBRE)))
7740                         return -EINVAL;
7741                 /* Fiber can only do SPEED_1000.  */
7742                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7743                          (cmd->speed != SPEED_1000))
7744                         return -EINVAL;
7745         /* Copper cannot force SPEED_1000.  */
7746         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7747                    (cmd->speed == SPEED_1000))
7748                 return -EINVAL;
7749         else if ((cmd->speed == SPEED_1000) &&
7750                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7751                 return -EINVAL;
7752
7753         tg3_full_lock(tp, 0);
7754
7755         tp->link_config.autoneg = cmd->autoneg;
7756         if (cmd->autoneg == AUTONEG_ENABLE) {
7757                 tp->link_config.advertising = cmd->advertising;
7758                 tp->link_config.speed = SPEED_INVALID;
7759                 tp->link_config.duplex = DUPLEX_INVALID;
7760         } else {
7761                 tp->link_config.advertising = 0;
7762                 tp->link_config.speed = cmd->speed;
7763                 tp->link_config.duplex = cmd->duplex;
7764         }
7765   
7766         if (netif_running(dev))
7767                 tg3_setup_phy(tp, 1);
7768
7769         tg3_full_unlock(tp);
7770   
7771         return 0;
7772 }
7773   
7774 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7775 {
7776         struct tg3 *tp = netdev_priv(dev);
7777   
7778         strcpy(info->driver, DRV_MODULE_NAME);
7779         strcpy(info->version, DRV_MODULE_VERSION);
7780         strcpy(info->fw_version, tp->fw_ver);
7781         strcpy(info->bus_info, pci_name(tp->pdev));
7782 }
7783   
7784 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7785 {
7786         struct tg3 *tp = netdev_priv(dev);
7787   
7788         wol->supported = WAKE_MAGIC;
7789         wol->wolopts = 0;
7790         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7791                 wol->wolopts = WAKE_MAGIC;
7792         memset(&wol->sopass, 0, sizeof(wol->sopass));
7793 }
7794   
7795 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7796 {
7797         struct tg3 *tp = netdev_priv(dev);
7798   
7799         if (wol->wolopts & ~WAKE_MAGIC)
7800                 return -EINVAL;
7801         if ((wol->wolopts & WAKE_MAGIC) &&
7802             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7803             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7804                 return -EINVAL;
7805   
7806         spin_lock_bh(&tp->lock);
7807         if (wol->wolopts & WAKE_MAGIC)
7808                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7809         else
7810                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7811         spin_unlock_bh(&tp->lock);
7812   
7813         return 0;
7814 }
7815   
7816 static u32 tg3_get_msglevel(struct net_device *dev)
7817 {
7818         struct tg3 *tp = netdev_priv(dev);
7819         return tp->msg_enable;
7820 }
7821   
7822 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7823 {
7824         struct tg3 *tp = netdev_priv(dev);
7825         tp->msg_enable = value;
7826 }
7827   
7828 #if TG3_TSO_SUPPORT != 0
7829 static int tg3_set_tso(struct net_device *dev, u32 value)
7830 {
7831         struct tg3 *tp = netdev_priv(dev);
7832
7833         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7834                 if (value)
7835                         return -EINVAL;
7836                 return 0;
7837         }
7838         return ethtool_op_set_tso(dev, value);
7839 }
7840 #endif
7841   
7842 static int tg3_nway_reset(struct net_device *dev)
7843 {
7844         struct tg3 *tp = netdev_priv(dev);
7845         u32 bmcr;
7846         int r;
7847   
7848         if (!netif_running(dev))
7849                 return -EAGAIN;
7850
7851         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7852                 return -EINVAL;
7853
7854         spin_lock_bh(&tp->lock);
7855         r = -EINVAL;
7856         tg3_readphy(tp, MII_BMCR, &bmcr);
7857         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7858             ((bmcr & BMCR_ANENABLE) ||
7859              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7860                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7861                                            BMCR_ANENABLE);
7862                 r = 0;
7863         }
7864         spin_unlock_bh(&tp->lock);
7865   
7866         return r;
7867 }
7868   
7869 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7870 {
7871         struct tg3 *tp = netdev_priv(dev);
7872   
7873         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7874         ering->rx_mini_max_pending = 0;
7875         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7876                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7877         else
7878                 ering->rx_jumbo_max_pending = 0;
7879
7880         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7881
7882         ering->rx_pending = tp->rx_pending;
7883         ering->rx_mini_pending = 0;
7884         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7885                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7886         else
7887                 ering->rx_jumbo_pending = 0;
7888
7889         ering->tx_pending = tp->tx_pending;
7890 }
7891   
7892 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7893 {
7894         struct tg3 *tp = netdev_priv(dev);
7895         int irq_sync = 0;
7896   
7897         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7898             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7899             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7900                 return -EINVAL;
7901   
7902         if (netif_running(dev)) {
7903                 tg3_netif_stop(tp);
7904                 irq_sync = 1;
7905         }
7906
7907         tg3_full_lock(tp, irq_sync);
7908   
7909         tp->rx_pending = ering->rx_pending;
7910
7911         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7912             tp->rx_pending > 63)
7913                 tp->rx_pending = 63;
7914         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7915         tp->tx_pending = ering->tx_pending;
7916
7917         if (netif_running(dev)) {
7918                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7919                 tg3_init_hw(tp, 1);
7920                 tg3_netif_start(tp);
7921         }
7922
7923         tg3_full_unlock(tp);
7924   
7925         return 0;
7926 }
7927   
7928 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7929 {
7930         struct tg3 *tp = netdev_priv(dev);
7931   
7932         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7933         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7934         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7935 }
7936   
7937 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7938 {
7939         struct tg3 *tp = netdev_priv(dev);
7940         int irq_sync = 0;
7941   
7942         if (netif_running(dev)) {
7943                 tg3_netif_stop(tp);
7944                 irq_sync = 1;
7945         }
7946
7947         tg3_full_lock(tp, irq_sync);
7948
7949         if (epause->autoneg)
7950                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7951         else
7952                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7953         if (epause->rx_pause)
7954                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7955         else
7956                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7957         if (epause->tx_pause)
7958                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7959         else
7960                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7961
7962         if (netif_running(dev)) {
7963                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7964                 tg3_init_hw(tp, 1);
7965                 tg3_netif_start(tp);
7966         }
7967
7968         tg3_full_unlock(tp);
7969   
7970         return 0;
7971 }
7972   
7973 static u32 tg3_get_rx_csum(struct net_device *dev)
7974 {
7975         struct tg3 *tp = netdev_priv(dev);
7976         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7977 }
7978   
7979 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7980 {
7981         struct tg3 *tp = netdev_priv(dev);
7982   
7983         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7984                 if (data != 0)
7985                         return -EINVAL;
7986                 return 0;
7987         }
7988   
7989         spin_lock_bh(&tp->lock);
7990         if (data)
7991                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7992         else
7993                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7994         spin_unlock_bh(&tp->lock);
7995   
7996         return 0;
7997 }
7998   
7999 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8000 {
8001         struct tg3 *tp = netdev_priv(dev);
8002   
8003         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8004                 if (data != 0)
8005                         return -EINVAL;
8006                 return 0;
8007         }
8008   
8009         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8010             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8011                 ethtool_op_set_tx_hw_csum(dev, data);
8012         else
8013                 ethtool_op_set_tx_csum(dev, data);
8014
8015         return 0;
8016 }
8017
8018 static int tg3_get_stats_count (struct net_device *dev)
8019 {
8020         return TG3_NUM_STATS;
8021 }
8022
8023 static int tg3_get_test_count (struct net_device *dev)
8024 {
8025         return TG3_NUM_TEST;
8026 }
8027
8028 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8029 {
8030         switch (stringset) {
8031         case ETH_SS_STATS:
8032                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8033                 break;
8034         case ETH_SS_TEST:
8035                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8036                 break;
8037         default:
8038                 WARN_ON(1);     /* we need a WARN() */
8039                 break;
8040         }
8041 }
8042
8043 static int tg3_phys_id(struct net_device *dev, u32 data)
8044 {
8045         struct tg3 *tp = netdev_priv(dev);
8046         int i;
8047
8048         if (!netif_running(tp->dev))
8049                 return -EAGAIN;
8050
8051         if (data == 0)
8052                 data = 2;
8053
8054         for (i = 0; i < (data * 2); i++) {
8055                 if ((i % 2) == 0)
8056                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8057                                            LED_CTRL_1000MBPS_ON |
8058                                            LED_CTRL_100MBPS_ON |
8059                                            LED_CTRL_10MBPS_ON |
8060                                            LED_CTRL_TRAFFIC_OVERRIDE |
8061                                            LED_CTRL_TRAFFIC_BLINK |
8062                                            LED_CTRL_TRAFFIC_LED);
8063         
8064                 else
8065                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8066                                            LED_CTRL_TRAFFIC_OVERRIDE);
8067
8068                 if (msleep_interruptible(500))
8069                         break;
8070         }
8071         tw32(MAC_LED_CTRL, tp->led_ctrl);
8072         return 0;
8073 }
8074
8075 static void tg3_get_ethtool_stats (struct net_device *dev,
8076                                    struct ethtool_stats *estats, u64 *tmp_stats)
8077 {
8078         struct tg3 *tp = netdev_priv(dev);
8079         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8080 }
8081
8082 #define NVRAM_TEST_SIZE 0x100
8083 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8084
8085 static int tg3_test_nvram(struct tg3 *tp)
8086 {
8087         u32 *buf, csum, magic;
8088         int i, j, err = 0, size;
8089
8090         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8091                 return -EIO;
8092
8093         if (magic == TG3_EEPROM_MAGIC)
8094                 size = NVRAM_TEST_SIZE;
8095         else if ((magic & 0xff000000) == 0xa5000000) {
8096                 if ((magic & 0xe00000) == 0x200000)
8097                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8098                 else
8099                         return 0;
8100         } else
8101                 return -EIO;
8102
8103         buf = kmalloc(size, GFP_KERNEL);
8104         if (buf == NULL)
8105                 return -ENOMEM;
8106
8107         err = -EIO;
8108         for (i = 0, j = 0; i < size; i += 4, j++) {
8109                 u32 val;
8110
8111                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8112                         break;
8113                 buf[j] = cpu_to_le32(val);
8114         }
8115         if (i < size)
8116                 goto out;
8117
8118         /* Selfboot format */
8119         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8120                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8121
8122                 for (i = 0; i < size; i++)
8123                         csum8 += buf8[i];
8124
8125                 if (csum8 == 0) {
8126                         err = 0;
8127                         goto out;
8128                 }
8129
8130                 err = -EIO;
8131                 goto out;
8132         }
8133
8134         /* Bootstrap checksum at offset 0x10 */
8135         csum = calc_crc((unsigned char *) buf, 0x10);
8136         if(csum != cpu_to_le32(buf[0x10/4]))
8137                 goto out;
8138
8139         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8140         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8141         if (csum != cpu_to_le32(buf[0xfc/4]))
8142                  goto out;
8143
8144         err = 0;
8145
8146 out:
8147         kfree(buf);
8148         return err;
8149 }
8150
8151 #define TG3_SERDES_TIMEOUT_SEC  2
8152 #define TG3_COPPER_TIMEOUT_SEC  6
8153
8154 static int tg3_test_link(struct tg3 *tp)
8155 {
8156         int i, max;
8157
8158         if (!netif_running(tp->dev))
8159                 return -ENODEV;
8160
8161         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8162                 max = TG3_SERDES_TIMEOUT_SEC;
8163         else
8164                 max = TG3_COPPER_TIMEOUT_SEC;
8165
8166         for (i = 0; i < max; i++) {
8167                 if (netif_carrier_ok(tp->dev))
8168                         return 0;
8169
8170                 if (msleep_interruptible(1000))
8171                         break;
8172         }
8173
8174         return -EIO;
8175 }
8176
8177 /* Only test the commonly used registers */
8178 static int tg3_test_registers(struct tg3 *tp)
8179 {
8180         int i, is_5705;
8181         u32 offset, read_mask, write_mask, val, save_val, read_val;
8182         static struct {
8183                 u16 offset;
8184                 u16 flags;
8185 #define TG3_FL_5705     0x1
8186 #define TG3_FL_NOT_5705 0x2
8187 #define TG3_FL_NOT_5788 0x4
8188                 u32 read_mask;
8189                 u32 write_mask;
8190         } reg_tbl[] = {
8191                 /* MAC Control Registers */
8192                 { MAC_MODE, TG3_FL_NOT_5705,
8193                         0x00000000, 0x00ef6f8c },
8194                 { MAC_MODE, TG3_FL_5705,
8195                         0x00000000, 0x01ef6b8c },
8196                 { MAC_STATUS, TG3_FL_NOT_5705,
8197                         0x03800107, 0x00000000 },
8198                 { MAC_STATUS, TG3_FL_5705,
8199                         0x03800100, 0x00000000 },
8200                 { MAC_ADDR_0_HIGH, 0x0000,
8201                         0x00000000, 0x0000ffff },
8202                 { MAC_ADDR_0_LOW, 0x0000,
8203                         0x00000000, 0xffffffff },
8204                 { MAC_RX_MTU_SIZE, 0x0000,
8205                         0x00000000, 0x0000ffff },
8206                 { MAC_TX_MODE, 0x0000,
8207                         0x00000000, 0x00000070 },
8208                 { MAC_TX_LENGTHS, 0x0000,
8209                         0x00000000, 0x00003fff },
8210                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8211                         0x00000000, 0x000007fc },
8212                 { MAC_RX_MODE, TG3_FL_5705,
8213                         0x00000000, 0x000007dc },
8214                 { MAC_HASH_REG_0, 0x0000,
8215                         0x00000000, 0xffffffff },
8216                 { MAC_HASH_REG_1, 0x0000,
8217                         0x00000000, 0xffffffff },
8218                 { MAC_HASH_REG_2, 0x0000,
8219                         0x00000000, 0xffffffff },
8220                 { MAC_HASH_REG_3, 0x0000,
8221                         0x00000000, 0xffffffff },
8222
8223                 /* Receive Data and Receive BD Initiator Control Registers. */
8224                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8225                         0x00000000, 0xffffffff },
8226                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8227                         0x00000000, 0xffffffff },
8228                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8229                         0x00000000, 0x00000003 },
8230                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8231                         0x00000000, 0xffffffff },
8232                 { RCVDBDI_STD_BD+0, 0x0000,
8233                         0x00000000, 0xffffffff },
8234                 { RCVDBDI_STD_BD+4, 0x0000,
8235                         0x00000000, 0xffffffff },
8236                 { RCVDBDI_STD_BD+8, 0x0000,
8237                         0x00000000, 0xffff0002 },
8238                 { RCVDBDI_STD_BD+0xc, 0x0000,
8239                         0x00000000, 0xffffffff },
8240         
8241                 /* Receive BD Initiator Control Registers. */
8242                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8243                         0x00000000, 0xffffffff },
8244                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8245                         0x00000000, 0x000003ff },
8246                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8247                         0x00000000, 0xffffffff },
8248         
8249                 /* Host Coalescing Control Registers. */
8250                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8251                         0x00000000, 0x00000004 },
8252                 { HOSTCC_MODE, TG3_FL_5705,
8253                         0x00000000, 0x000000f6 },
8254                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8255                         0x00000000, 0xffffffff },
8256                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8257                         0x00000000, 0x000003ff },
8258                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8259                         0x00000000, 0xffffffff },
8260                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8261                         0x00000000, 0x000003ff },
8262                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8263                         0x00000000, 0xffffffff },
8264                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8265                         0x00000000, 0x000000ff },
8266                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8267                         0x00000000, 0xffffffff },
8268                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8269                         0x00000000, 0x000000ff },
8270                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8271                         0x00000000, 0xffffffff },
8272                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8273                         0x00000000, 0xffffffff },
8274                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8275                         0x00000000, 0xffffffff },
8276                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8277                         0x00000000, 0x000000ff },
8278                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8279                         0x00000000, 0xffffffff },
8280                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8281                         0x00000000, 0x000000ff },
8282                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8283                         0x00000000, 0xffffffff },
8284                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8285                         0x00000000, 0xffffffff },
8286                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8287                         0x00000000, 0xffffffff },
8288                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8289                         0x00000000, 0xffffffff },
8290                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8291                         0x00000000, 0xffffffff },
8292                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8293                         0xffffffff, 0x00000000 },
8294                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8295                         0xffffffff, 0x00000000 },
8296
8297                 /* Buffer Manager Control Registers. */
8298                 { BUFMGR_MB_POOL_ADDR, 0x0000,
8299                         0x00000000, 0x007fff80 },
8300                 { BUFMGR_MB_POOL_SIZE, 0x0000,
8301                         0x00000000, 0x007fffff },
8302                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8303                         0x00000000, 0x0000003f },
8304                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8305                         0x00000000, 0x000001ff },
8306                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8307                         0x00000000, 0x000001ff },
8308                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8309                         0xffffffff, 0x00000000 },
8310                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8311                         0xffffffff, 0x00000000 },
8312         
8313                 /* Mailbox Registers */
8314                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8315                         0x00000000, 0x000001ff },
8316                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8317                         0x00000000, 0x000001ff },
8318                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8319                         0x00000000, 0x000007ff },
8320                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8321                         0x00000000, 0x000001ff },
8322
8323                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8324         };
8325
8326         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8327                 is_5705 = 1;
8328         else
8329                 is_5705 = 0;
8330
8331         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8332                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8333                         continue;
8334
8335                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8336                         continue;
8337
8338                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8339                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8340                         continue;
8341
8342                 offset = (u32) reg_tbl[i].offset;
8343                 read_mask = reg_tbl[i].read_mask;
8344                 write_mask = reg_tbl[i].write_mask;
8345
8346                 /* Save the original register content */
8347                 save_val = tr32(offset);
8348
8349                 /* Determine the read-only value. */
8350                 read_val = save_val & read_mask;
8351
8352                 /* Write zero to the register, then make sure the read-only bits
8353                  * are not changed and the read/write bits are all zeros.
8354                  */
8355                 tw32(offset, 0);
8356
8357                 val = tr32(offset);
8358
8359                 /* Test the read-only and read/write bits. */
8360                 if (((val & read_mask) != read_val) || (val & write_mask))
8361                         goto out;
8362
8363                 /* Write ones to all the bits defined by RdMask and WrMask, then
8364                  * make sure the read-only bits are not changed and the
8365                  * read/write bits are all ones.
8366                  */
8367                 tw32(offset, read_mask | write_mask);
8368
8369                 val = tr32(offset);
8370
8371                 /* Test the read-only bits. */
8372                 if ((val & read_mask) != read_val)
8373                         goto out;
8374
8375                 /* Test the read/write bits. */
8376                 if ((val & write_mask) != write_mask)
8377                         goto out;
8378
8379                 tw32(offset, save_val);
8380         }
8381
8382         return 0;
8383
8384 out:
8385         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8386         tw32(offset, save_val);
8387         return -EIO;
8388 }
8389
8390 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8391 {
8392         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8393         int i;
8394         u32 j;
8395
8396         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8397                 for (j = 0; j < len; j += 4) {
8398                         u32 val;
8399
8400                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8401                         tg3_read_mem(tp, offset + j, &val);
8402                         if (val != test_pattern[i])
8403                                 return -EIO;
8404                 }
8405         }
8406         return 0;
8407 }
8408
8409 static int tg3_test_memory(struct tg3 *tp)
8410 {
8411         static struct mem_entry {
8412                 u32 offset;
8413                 u32 len;
8414         } mem_tbl_570x[] = {
8415                 { 0x00000000, 0x00b50},
8416                 { 0x00002000, 0x1c000},
8417                 { 0xffffffff, 0x00000}
8418         }, mem_tbl_5705[] = {
8419                 { 0x00000100, 0x0000c},
8420                 { 0x00000200, 0x00008},
8421                 { 0x00004000, 0x00800},
8422                 { 0x00006000, 0x01000},
8423                 { 0x00008000, 0x02000},
8424                 { 0x00010000, 0x0e000},
8425                 { 0xffffffff, 0x00000}
8426         }, mem_tbl_5755[] = {
8427                 { 0x00000200, 0x00008},
8428                 { 0x00004000, 0x00800},
8429                 { 0x00006000, 0x00800},
8430                 { 0x00008000, 0x02000},
8431                 { 0x00010000, 0x0c000},
8432                 { 0xffffffff, 0x00000}
8433         };
8434         struct mem_entry *mem_tbl;
8435         int err = 0;
8436         int i;
8437
8438         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8439                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8440                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8441                         mem_tbl = mem_tbl_5755;
8442                 else
8443                         mem_tbl = mem_tbl_5705;
8444         } else
8445                 mem_tbl = mem_tbl_570x;
8446
8447         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8448                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8449                     mem_tbl[i].len)) != 0)
8450                         break;
8451         }
8452         
8453         return err;
8454 }
8455
8456 #define TG3_MAC_LOOPBACK        0
8457 #define TG3_PHY_LOOPBACK        1
8458
8459 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8460 {
8461         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8462         u32 desc_idx;
8463         struct sk_buff *skb, *rx_skb;
8464         u8 *tx_data;
8465         dma_addr_t map;
8466         int num_pkts, tx_len, rx_len, i, err;
8467         struct tg3_rx_buffer_desc *desc;
8468
8469         if (loopback_mode == TG3_MAC_LOOPBACK) {
8470                 /* HW errata - mac loopback fails in some cases on 5780.
8471                  * Normal traffic and PHY loopback are not affected by
8472                  * errata.
8473                  */
8474                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8475                         return 0;
8476
8477                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8478                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8479                            MAC_MODE_PORT_MODE_GMII;
8480                 tw32(MAC_MODE, mac_mode);
8481         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8482                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8483                                            BMCR_SPEED1000);
8484                 udelay(40);
8485                 /* reset to prevent losing 1st rx packet intermittently */
8486                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8487                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8488                         udelay(10);
8489                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8490                 }
8491                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8492                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8493                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8494                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8495                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8496                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8497                 }
8498                 tw32(MAC_MODE, mac_mode);
8499         }
8500         else
8501                 return -EINVAL;
8502
8503         err = -EIO;
8504
8505         tx_len = 1514;
8506         skb = dev_alloc_skb(tx_len);
8507         if (!skb)
8508                 return -ENOMEM;
8509
8510         tx_data = skb_put(skb, tx_len);
8511         memcpy(tx_data, tp->dev->dev_addr, 6);
8512         memset(tx_data + 6, 0x0, 8);
8513
8514         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8515
8516         for (i = 14; i < tx_len; i++)
8517                 tx_data[i] = (u8) (i & 0xff);
8518
8519         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8520
8521         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8522              HOSTCC_MODE_NOW);
8523
8524         udelay(10);
8525
8526         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8527
8528         num_pkts = 0;
8529
8530         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8531
8532         tp->tx_prod++;
8533         num_pkts++;
8534
8535         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8536                      tp->tx_prod);
8537         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8538
8539         udelay(10);
8540
8541         for (i = 0; i < 10; i++) {
8542                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8543                        HOSTCC_MODE_NOW);
8544
8545                 udelay(10);
8546
8547                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8548                 rx_idx = tp->hw_status->idx[0].rx_producer;
8549                 if ((tx_idx == tp->tx_prod) &&
8550                     (rx_idx == (rx_start_idx + num_pkts)))
8551                         break;
8552         }
8553
8554         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8555         dev_kfree_skb(skb);
8556
8557         if (tx_idx != tp->tx_prod)
8558                 goto out;
8559
8560         if (rx_idx != rx_start_idx + num_pkts)
8561                 goto out;
8562
8563         desc = &tp->rx_rcb[rx_start_idx];
8564         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8565         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8566         if (opaque_key != RXD_OPAQUE_RING_STD)
8567                 goto out;
8568
8569         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8570             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8571                 goto out;
8572
8573         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8574         if (rx_len != tx_len)
8575                 goto out;
8576
8577         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8578
8579         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8580         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8581
8582         for (i = 14; i < tx_len; i++) {
8583                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8584                         goto out;
8585         }
8586         err = 0;
8587         
8588         /* tg3_free_rings will unmap and free the rx_skb */
8589 out:
8590         return err;
8591 }
8592
8593 #define TG3_MAC_LOOPBACK_FAILED         1
8594 #define TG3_PHY_LOOPBACK_FAILED         2
8595 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8596                                          TG3_PHY_LOOPBACK_FAILED)
8597
8598 static int tg3_test_loopback(struct tg3 *tp)
8599 {
8600         int err = 0;
8601
8602         if (!netif_running(tp->dev))
8603                 return TG3_LOOPBACK_FAILED;
8604
8605         tg3_reset_hw(tp, 1);
8606
8607         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8608                 err |= TG3_MAC_LOOPBACK_FAILED;
8609         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8610                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8611                         err |= TG3_PHY_LOOPBACK_FAILED;
8612         }
8613
8614         return err;
8615 }
8616
8617 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8618                           u64 *data)
8619 {
8620         struct tg3 *tp = netdev_priv(dev);
8621
8622         if (tp->link_config.phy_is_low_power)
8623                 tg3_set_power_state(tp, PCI_D0);
8624
8625         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8626
8627         if (tg3_test_nvram(tp) != 0) {
8628                 etest->flags |= ETH_TEST_FL_FAILED;
8629                 data[0] = 1;
8630         }
8631         if (tg3_test_link(tp) != 0) {
8632                 etest->flags |= ETH_TEST_FL_FAILED;
8633                 data[1] = 1;
8634         }
8635         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8636                 int err, irq_sync = 0;
8637
8638                 if (netif_running(dev)) {
8639                         tg3_netif_stop(tp);
8640                         irq_sync = 1;
8641                 }
8642
8643                 tg3_full_lock(tp, irq_sync);
8644
8645                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8646                 err = tg3_nvram_lock(tp);
8647                 tg3_halt_cpu(tp, RX_CPU_BASE);
8648                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8649                         tg3_halt_cpu(tp, TX_CPU_BASE);
8650                 if (!err)
8651                         tg3_nvram_unlock(tp);
8652
8653                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8654                         tg3_phy_reset(tp);
8655
8656                 if (tg3_test_registers(tp) != 0) {
8657                         etest->flags |= ETH_TEST_FL_FAILED;
8658                         data[2] = 1;
8659                 }
8660                 if (tg3_test_memory(tp) != 0) {
8661                         etest->flags |= ETH_TEST_FL_FAILED;
8662                         data[3] = 1;
8663                 }
8664                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8665                         etest->flags |= ETH_TEST_FL_FAILED;
8666
8667                 tg3_full_unlock(tp);
8668
8669                 if (tg3_test_interrupt(tp) != 0) {
8670                         etest->flags |= ETH_TEST_FL_FAILED;
8671                         data[5] = 1;
8672                 }
8673
8674                 tg3_full_lock(tp, 0);
8675
8676                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8677                 if (netif_running(dev)) {
8678                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8679                         tg3_init_hw(tp, 1);
8680                         tg3_netif_start(tp);
8681                 }
8682
8683                 tg3_full_unlock(tp);
8684         }
8685         if (tp->link_config.phy_is_low_power)
8686                 tg3_set_power_state(tp, PCI_D3hot);
8687
8688 }
8689
8690 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8691 {
8692         struct mii_ioctl_data *data = if_mii(ifr);
8693         struct tg3 *tp = netdev_priv(dev);
8694         int err;
8695
8696         switch(cmd) {
8697         case SIOCGMIIPHY:
8698                 data->phy_id = PHY_ADDR;
8699
8700                 /* fallthru */
8701         case SIOCGMIIREG: {
8702                 u32 mii_regval;
8703
8704                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8705                         break;                  /* We have no PHY */
8706
8707                 if (tp->link_config.phy_is_low_power)
8708                         return -EAGAIN;
8709
8710                 spin_lock_bh(&tp->lock);
8711                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8712                 spin_unlock_bh(&tp->lock);
8713
8714                 data->val_out = mii_regval;
8715
8716                 return err;
8717         }
8718
8719         case SIOCSMIIREG:
8720                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8721                         break;                  /* We have no PHY */
8722
8723                 if (!capable(CAP_NET_ADMIN))
8724                         return -EPERM;
8725
8726                 if (tp->link_config.phy_is_low_power)
8727                         return -EAGAIN;
8728
8729                 spin_lock_bh(&tp->lock);
8730                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8731                 spin_unlock_bh(&tp->lock);
8732
8733                 return err;
8734
8735         default:
8736                 /* do nothing */
8737                 break;
8738         }
8739         return -EOPNOTSUPP;
8740 }
8741
8742 #if TG3_VLAN_TAG_USED
8743 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8744 {
8745         struct tg3 *tp = netdev_priv(dev);
8746
8747         tg3_full_lock(tp, 0);
8748
8749         tp->vlgrp = grp;
8750
8751         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8752         __tg3_set_rx_mode(dev);
8753
8754         tg3_full_unlock(tp);
8755 }
8756
8757 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8758 {
8759         struct tg3 *tp = netdev_priv(dev);
8760
8761         tg3_full_lock(tp, 0);
8762         if (tp->vlgrp)
8763                 tp->vlgrp->vlan_devices[vid] = NULL;
8764         tg3_full_unlock(tp);
8765 }
8766 #endif
8767
8768 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8769 {
8770         struct tg3 *tp = netdev_priv(dev);
8771
8772         memcpy(ec, &tp->coal, sizeof(*ec));
8773         return 0;
8774 }
8775
8776 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8777 {
8778         struct tg3 *tp = netdev_priv(dev);
8779         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8780         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8781
8782         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8783                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8784                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8785                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8786                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8787         }
8788
8789         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8790             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8791             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8792             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8793             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8794             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8795             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8796             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8797             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8798             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8799                 return -EINVAL;
8800
8801         /* No rx interrupts will be generated if both are zero */
8802         if ((ec->rx_coalesce_usecs == 0) &&
8803             (ec->rx_max_coalesced_frames == 0))
8804                 return -EINVAL;
8805
8806         /* No tx interrupts will be generated if both are zero */
8807         if ((ec->tx_coalesce_usecs == 0) &&
8808             (ec->tx_max_coalesced_frames == 0))
8809                 return -EINVAL;
8810
8811         /* Only copy relevant parameters, ignore all others. */
8812         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8813         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8814         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8815         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8816         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8817         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8818         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8819         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8820         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8821
8822         if (netif_running(dev)) {
8823                 tg3_full_lock(tp, 0);
8824                 __tg3_set_coalesce(tp, &tp->coal);
8825                 tg3_full_unlock(tp);
8826         }
8827         return 0;
8828 }
8829
8830 static struct ethtool_ops tg3_ethtool_ops = {
8831         .get_settings           = tg3_get_settings,
8832         .set_settings           = tg3_set_settings,
8833         .get_drvinfo            = tg3_get_drvinfo,
8834         .get_regs_len           = tg3_get_regs_len,
8835         .get_regs               = tg3_get_regs,
8836         .get_wol                = tg3_get_wol,
8837         .set_wol                = tg3_set_wol,
8838         .get_msglevel           = tg3_get_msglevel,
8839         .set_msglevel           = tg3_set_msglevel,
8840         .nway_reset             = tg3_nway_reset,
8841         .get_link               = ethtool_op_get_link,
8842         .get_eeprom_len         = tg3_get_eeprom_len,
8843         .get_eeprom             = tg3_get_eeprom,
8844         .set_eeprom             = tg3_set_eeprom,
8845         .get_ringparam          = tg3_get_ringparam,
8846         .set_ringparam          = tg3_set_ringparam,
8847         .get_pauseparam         = tg3_get_pauseparam,
8848         .set_pauseparam         = tg3_set_pauseparam,
8849         .get_rx_csum            = tg3_get_rx_csum,
8850         .set_rx_csum            = tg3_set_rx_csum,
8851         .get_tx_csum            = ethtool_op_get_tx_csum,
8852         .set_tx_csum            = tg3_set_tx_csum,
8853         .get_sg                 = ethtool_op_get_sg,
8854         .set_sg                 = ethtool_op_set_sg,
8855 #if TG3_TSO_SUPPORT != 0
8856         .get_tso                = ethtool_op_get_tso,
8857         .set_tso                = tg3_set_tso,
8858 #endif
8859         .self_test_count        = tg3_get_test_count,
8860         .self_test              = tg3_self_test,
8861         .get_strings            = tg3_get_strings,
8862         .phys_id                = tg3_phys_id,
8863         .get_stats_count        = tg3_get_stats_count,
8864         .get_ethtool_stats      = tg3_get_ethtool_stats,
8865         .get_coalesce           = tg3_get_coalesce,
8866         .set_coalesce           = tg3_set_coalesce,
8867         .get_perm_addr          = ethtool_op_get_perm_addr,
8868 };
8869
8870 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8871 {
8872         u32 cursize, val, magic;
8873
8874         tp->nvram_size = EEPROM_CHIP_SIZE;
8875
8876         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8877                 return;
8878
8879         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
8880                 return;
8881
8882         /*
8883          * Size the chip by reading offsets at increasing powers of two.
8884          * When we encounter our validation signature, we know the addressing
8885          * has wrapped around, and thus have our chip size.
8886          */
8887         cursize = 0x10;
8888
8889         while (cursize < tp->nvram_size) {
8890                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
8891                         return;
8892
8893                 if (val == magic)
8894                         break;
8895
8896                 cursize <<= 1;
8897         }
8898
8899         tp->nvram_size = cursize;
8900 }
8901                 
8902 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8903 {
8904         u32 val;
8905
8906         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
8907                 return;
8908
8909         /* Selfboot format */
8910         if (val != TG3_EEPROM_MAGIC) {
8911                 tg3_get_eeprom_size(tp);
8912                 return;
8913         }
8914
8915         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8916                 if (val != 0) {
8917                         tp->nvram_size = (val >> 16) * 1024;
8918                         return;
8919                 }
8920         }
8921         tp->nvram_size = 0x20000;
8922 }
8923
8924 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8925 {
8926         u32 nvcfg1;
8927
8928         nvcfg1 = tr32(NVRAM_CFG1);
8929         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8930                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8931         }
8932         else {
8933                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8934                 tw32(NVRAM_CFG1, nvcfg1);
8935         }
8936
8937         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8938             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8939                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8940                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8941                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8942                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8943                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8944                                 break;
8945                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8946                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8947                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8948                                 break;
8949                         case FLASH_VENDOR_ATMEL_EEPROM:
8950                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8951                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8952                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8953                                 break;
8954                         case FLASH_VENDOR_ST:
8955                                 tp->nvram_jedecnum = JEDEC_ST;
8956                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8957                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8958                                 break;
8959                         case FLASH_VENDOR_SAIFUN:
8960                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8961                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8962                                 break;
8963                         case FLASH_VENDOR_SST_SMALL:
8964                         case FLASH_VENDOR_SST_LARGE:
8965                                 tp->nvram_jedecnum = JEDEC_SST;
8966                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8967                                 break;
8968                 }
8969         }
8970         else {
8971                 tp->nvram_jedecnum = JEDEC_ATMEL;
8972                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8973                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8974         }
8975 }
8976
8977 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8978 {
8979         u32 nvcfg1;
8980
8981         nvcfg1 = tr32(NVRAM_CFG1);
8982
8983         /* NVRAM protection for TPM */
8984         if (nvcfg1 & (1 << 27))
8985                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8986
8987         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8988                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8989                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8990                         tp->nvram_jedecnum = JEDEC_ATMEL;
8991                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8992                         break;
8993                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8994                         tp->nvram_jedecnum = JEDEC_ATMEL;
8995                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8996                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8997                         break;
8998                 case FLASH_5752VENDOR_ST_M45PE10:
8999                 case FLASH_5752VENDOR_ST_M45PE20:
9000                 case FLASH_5752VENDOR_ST_M45PE40:
9001                         tp->nvram_jedecnum = JEDEC_ST;
9002                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9003                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9004                         break;
9005         }
9006
9007         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9008                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9009                         case FLASH_5752PAGE_SIZE_256:
9010                                 tp->nvram_pagesize = 256;
9011                                 break;
9012                         case FLASH_5752PAGE_SIZE_512:
9013                                 tp->nvram_pagesize = 512;
9014                                 break;
9015                         case FLASH_5752PAGE_SIZE_1K:
9016                                 tp->nvram_pagesize = 1024;
9017                                 break;
9018                         case FLASH_5752PAGE_SIZE_2K:
9019                                 tp->nvram_pagesize = 2048;
9020                                 break;
9021                         case FLASH_5752PAGE_SIZE_4K:
9022                                 tp->nvram_pagesize = 4096;
9023                                 break;
9024                         case FLASH_5752PAGE_SIZE_264:
9025                                 tp->nvram_pagesize = 264;
9026                                 break;
9027                 }
9028         }
9029         else {
9030                 /* For eeprom, set pagesize to maximum eeprom size */
9031                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9032
9033                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9034                 tw32(NVRAM_CFG1, nvcfg1);
9035         }
9036 }
9037
9038 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9039 {
9040         u32 nvcfg1;
9041
9042         nvcfg1 = tr32(NVRAM_CFG1);
9043
9044         /* NVRAM protection for TPM */
9045         if (nvcfg1 & (1 << 27))
9046                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9047
9048         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9049                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9050                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9051                         tp->nvram_jedecnum = JEDEC_ATMEL;
9052                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9053                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9054
9055                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9056                         tw32(NVRAM_CFG1, nvcfg1);
9057                         break;
9058                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9059                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9060                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9061                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9062                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9063                         tp->nvram_jedecnum = JEDEC_ATMEL;
9064                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9065                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9066                         tp->nvram_pagesize = 264;
9067                         break;
9068                 case FLASH_5752VENDOR_ST_M45PE10:
9069                 case FLASH_5752VENDOR_ST_M45PE20:
9070                 case FLASH_5752VENDOR_ST_M45PE40:
9071                         tp->nvram_jedecnum = JEDEC_ST;
9072                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9073                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9074                         tp->nvram_pagesize = 256;
9075                         break;
9076         }
9077 }
9078
9079 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9080 {
9081         u32 nvcfg1;
9082
9083         nvcfg1 = tr32(NVRAM_CFG1);
9084
9085         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9086                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9087                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9088                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9089                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9090                         tp->nvram_jedecnum = JEDEC_ATMEL;
9091                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9092                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9093
9094                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9095                         tw32(NVRAM_CFG1, nvcfg1);
9096                         break;
9097                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9098                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9099                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9100                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9101                         tp->nvram_jedecnum = JEDEC_ATMEL;
9102                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9103                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9104                         tp->nvram_pagesize = 264;
9105                         break;
9106                 case FLASH_5752VENDOR_ST_M45PE10:
9107                 case FLASH_5752VENDOR_ST_M45PE20:
9108                 case FLASH_5752VENDOR_ST_M45PE40:
9109                         tp->nvram_jedecnum = JEDEC_ST;
9110                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9111                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9112                         tp->nvram_pagesize = 256;
9113                         break;
9114         }
9115 }
9116
9117 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9118 static void __devinit tg3_nvram_init(struct tg3 *tp)
9119 {
9120         int j;
9121
9122         tw32_f(GRC_EEPROM_ADDR,
9123              (EEPROM_ADDR_FSM_RESET |
9124               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9125                EEPROM_ADDR_CLKPERD_SHIFT)));
9126
9127         /* XXX schedule_timeout() ... */
9128         for (j = 0; j < 100; j++)
9129                 udelay(10);
9130
9131         /* Enable seeprom accesses. */
9132         tw32_f(GRC_LOCAL_CTRL,
9133              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9134         udelay(100);
9135
9136         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9137             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9138                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9139
9140                 if (tg3_nvram_lock(tp)) {
9141                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9142                                "tg3_nvram_init failed.\n", tp->dev->name);
9143                         return;
9144                 }
9145                 tg3_enable_nvram_access(tp);
9146
9147                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9148                         tg3_get_5752_nvram_info(tp);
9149                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9150                         tg3_get_5755_nvram_info(tp);
9151                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9152                         tg3_get_5787_nvram_info(tp);
9153                 else
9154                         tg3_get_nvram_info(tp);
9155
9156                 tg3_get_nvram_size(tp);
9157
9158                 tg3_disable_nvram_access(tp);
9159                 tg3_nvram_unlock(tp);
9160
9161         } else {
9162                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9163
9164                 tg3_get_eeprom_size(tp);
9165         }
9166 }
9167
9168 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9169                                         u32 offset, u32 *val)
9170 {
9171         u32 tmp;
9172         int i;
9173
9174         if (offset > EEPROM_ADDR_ADDR_MASK ||
9175             (offset % 4) != 0)
9176                 return -EINVAL;
9177
9178         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9179                                         EEPROM_ADDR_DEVID_MASK |
9180                                         EEPROM_ADDR_READ);
9181         tw32(GRC_EEPROM_ADDR,
9182              tmp |
9183              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9184              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9185               EEPROM_ADDR_ADDR_MASK) |
9186              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9187
9188         for (i = 0; i < 10000; i++) {
9189                 tmp = tr32(GRC_EEPROM_ADDR);
9190
9191                 if (tmp & EEPROM_ADDR_COMPLETE)
9192                         break;
9193                 udelay(100);
9194         }
9195         if (!(tmp & EEPROM_ADDR_COMPLETE))
9196                 return -EBUSY;
9197
9198         *val = tr32(GRC_EEPROM_DATA);
9199         return 0;
9200 }
9201
9202 #define NVRAM_CMD_TIMEOUT 10000
9203
9204 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9205 {
9206         int i;
9207
9208         tw32(NVRAM_CMD, nvram_cmd);
9209         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9210                 udelay(10);
9211                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9212                         udelay(10);
9213                         break;
9214                 }
9215         }
9216         if (i == NVRAM_CMD_TIMEOUT) {
9217                 return -EBUSY;
9218         }
9219         return 0;
9220 }
9221
9222 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9223 {
9224         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9225             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9226             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9227             (tp->nvram_jedecnum == JEDEC_ATMEL))
9228
9229                 addr = ((addr / tp->nvram_pagesize) <<
9230                         ATMEL_AT45DB0X1B_PAGE_POS) +
9231                        (addr % tp->nvram_pagesize);
9232
9233         return addr;
9234 }
9235
9236 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9237 {
9238         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9239             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9240             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9241             (tp->nvram_jedecnum == JEDEC_ATMEL))
9242
9243                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9244                         tp->nvram_pagesize) +
9245                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9246
9247         return addr;
9248 }
9249
9250 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9251 {
9252         int ret;
9253
9254         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9255                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9256
9257         offset = tg3_nvram_phys_addr(tp, offset);
9258
9259         if (offset > NVRAM_ADDR_MSK)
9260                 return -EINVAL;
9261
9262         ret = tg3_nvram_lock(tp);
9263         if (ret)
9264                 return ret;
9265
9266         tg3_enable_nvram_access(tp);
9267
9268         tw32(NVRAM_ADDR, offset);
9269         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9270                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9271
9272         if (ret == 0)
9273                 *val = swab32(tr32(NVRAM_RDDATA));
9274
9275         tg3_disable_nvram_access(tp);
9276
9277         tg3_nvram_unlock(tp);
9278
9279         return ret;
9280 }
9281
9282 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9283 {
9284         int err;
9285         u32 tmp;
9286
9287         err = tg3_nvram_read(tp, offset, &tmp);
9288         *val = swab32(tmp);
9289         return err;
9290 }
9291
9292 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9293                                     u32 offset, u32 len, u8 *buf)
9294 {
9295         int i, j, rc = 0;
9296         u32 val;
9297
9298         for (i = 0; i < len; i += 4) {
9299                 u32 addr, data;
9300
9301                 addr = offset + i;
9302
9303                 memcpy(&data, buf + i, 4);
9304
9305                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9306
9307                 val = tr32(GRC_EEPROM_ADDR);
9308                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9309
9310                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9311                         EEPROM_ADDR_READ);
9312                 tw32(GRC_EEPROM_ADDR, val |
9313                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9314                         (addr & EEPROM_ADDR_ADDR_MASK) |
9315                         EEPROM_ADDR_START |
9316                         EEPROM_ADDR_WRITE);
9317                 
9318                 for (j = 0; j < 10000; j++) {
9319                         val = tr32(GRC_EEPROM_ADDR);
9320
9321                         if (val & EEPROM_ADDR_COMPLETE)
9322                                 break;
9323                         udelay(100);
9324                 }
9325                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9326                         rc = -EBUSY;
9327                         break;
9328                 }
9329         }
9330
9331         return rc;
9332 }
9333
9334 /* offset and length are dword aligned */
9335 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9336                 u8 *buf)
9337 {
9338         int ret = 0;
9339         u32 pagesize = tp->nvram_pagesize;
9340         u32 pagemask = pagesize - 1;
9341         u32 nvram_cmd;
9342         u8 *tmp;
9343
9344         tmp = kmalloc(pagesize, GFP_KERNEL);
9345         if (tmp == NULL)
9346                 return -ENOMEM;
9347
9348         while (len) {
9349                 int j;
9350                 u32 phy_addr, page_off, size;
9351
9352                 phy_addr = offset & ~pagemask;
9353         
9354                 for (j = 0; j < pagesize; j += 4) {
9355                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9356                                                 (u32 *) (tmp + j))))
9357                                 break;
9358                 }
9359                 if (ret)
9360                         break;
9361
9362                 page_off = offset & pagemask;
9363                 size = pagesize;
9364                 if (len < size)
9365                         size = len;
9366
9367                 len -= size;
9368
9369                 memcpy(tmp + page_off, buf, size);
9370
9371                 offset = offset + (pagesize - page_off);
9372
9373                 tg3_enable_nvram_access(tp);
9374
9375                 /*
9376                  * Before we can erase the flash page, we need
9377                  * to issue a special "write enable" command.
9378                  */
9379                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9380
9381                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9382                         break;
9383
9384                 /* Erase the target page */
9385                 tw32(NVRAM_ADDR, phy_addr);
9386
9387                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9388                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9389
9390                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9391                         break;
9392
9393                 /* Issue another write enable to start the write. */
9394                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9395
9396                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9397                         break;
9398
9399                 for (j = 0; j < pagesize; j += 4) {
9400                         u32 data;
9401
9402                         data = *((u32 *) (tmp + j));
9403                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9404
9405                         tw32(NVRAM_ADDR, phy_addr + j);
9406
9407                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9408                                 NVRAM_CMD_WR;
9409
9410                         if (j == 0)
9411                                 nvram_cmd |= NVRAM_CMD_FIRST;
9412                         else if (j == (pagesize - 4))
9413                                 nvram_cmd |= NVRAM_CMD_LAST;
9414
9415                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9416                                 break;
9417                 }
9418                 if (ret)
9419                         break;
9420         }
9421
9422         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9423         tg3_nvram_exec_cmd(tp, nvram_cmd);
9424
9425         kfree(tmp);
9426
9427         return ret;
9428 }
9429
9430 /* offset and length are dword aligned */
9431 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9432                 u8 *buf)
9433 {
9434         int i, ret = 0;
9435
9436         for (i = 0; i < len; i += 4, offset += 4) {
9437                 u32 data, page_off, phy_addr, nvram_cmd;
9438
9439                 memcpy(&data, buf + i, 4);
9440                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9441
9442                 page_off = offset % tp->nvram_pagesize;
9443
9444                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9445
9446                 tw32(NVRAM_ADDR, phy_addr);
9447
9448                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9449
9450                 if ((page_off == 0) || (i == 0))
9451                         nvram_cmd |= NVRAM_CMD_FIRST;
9452                 if (page_off == (tp->nvram_pagesize - 4))
9453                         nvram_cmd |= NVRAM_CMD_LAST;
9454
9455                 if (i == (len - 4))
9456                         nvram_cmd |= NVRAM_CMD_LAST;
9457
9458                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9459                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9460                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9461                     (tp->nvram_jedecnum == JEDEC_ST) &&
9462                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9463
9464                         if ((ret = tg3_nvram_exec_cmd(tp,
9465                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9466                                 NVRAM_CMD_DONE)))
9467
9468                                 break;
9469                 }
9470                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9471                         /* We always do complete word writes to eeprom. */
9472                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9473                 }
9474
9475                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9476                         break;
9477         }
9478         return ret;
9479 }
9480
9481 /* offset and length are dword aligned */
9482 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9483 {
9484         int ret;
9485
9486         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9487                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9488                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9489                 udelay(40);
9490         }
9491
9492         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9493                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9494         }
9495         else {
9496                 u32 grc_mode;
9497
9498                 ret = tg3_nvram_lock(tp);
9499                 if (ret)
9500                         return ret;
9501
9502                 tg3_enable_nvram_access(tp);
9503                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9504                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9505                         tw32(NVRAM_WRITE1, 0x406);
9506
9507                 grc_mode = tr32(GRC_MODE);
9508                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9509
9510                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9511                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9512
9513                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9514                                 buf);
9515                 }
9516                 else {
9517                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9518                                 buf);
9519                 }
9520
9521                 grc_mode = tr32(GRC_MODE);
9522                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9523
9524                 tg3_disable_nvram_access(tp);
9525                 tg3_nvram_unlock(tp);
9526         }
9527
9528         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9529                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9530                 udelay(40);
9531         }
9532
9533         return ret;
9534 }
9535
9536 struct subsys_tbl_ent {
9537         u16 subsys_vendor, subsys_devid;
9538         u32 phy_id;
9539 };
9540
9541 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9542         /* Broadcom boards. */
9543         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9544         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9545         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9546         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9547         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9548         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9549         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9550         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9551         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9552         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9553         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9554
9555         /* 3com boards. */
9556         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9557         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9558         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9559         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9560         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9561
9562         /* DELL boards. */
9563         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9564         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9565         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9566         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9567
9568         /* Compaq boards. */
9569         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9570         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9571         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9572         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9573         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9574
9575         /* IBM boards. */
9576         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9577 };
9578
9579 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9580 {
9581         int i;
9582
9583         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9584                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9585                      tp->pdev->subsystem_vendor) &&
9586                     (subsys_id_to_phy_id[i].subsys_devid ==
9587                      tp->pdev->subsystem_device))
9588                         return &subsys_id_to_phy_id[i];
9589         }
9590         return NULL;
9591 }
9592
9593 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9594 {
9595         u32 val;
9596         u16 pmcsr;
9597
9598         /* On some early chips the SRAM cannot be accessed in D3hot state,
9599          * so need make sure we're in D0.
9600          */
9601         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9602         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9603         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9604         msleep(1);
9605
9606         /* Make sure register accesses (indirect or otherwise)
9607          * will function correctly.
9608          */
9609         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9610                                tp->misc_host_ctrl);
9611
9612         /* The memory arbiter has to be enabled in order for SRAM accesses
9613          * to succeed.  Normally on powerup the tg3 chip firmware will make
9614          * sure it is enabled, but other entities such as system netboot
9615          * code might disable it.
9616          */
9617         val = tr32(MEMARB_MODE);
9618         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9619
9620         tp->phy_id = PHY_ID_INVALID;
9621         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9622
9623         /* Assume an onboard device by default.  */
9624         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9625
9626         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9627         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9628                 u32 nic_cfg, led_cfg;
9629                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9630                 int eeprom_phy_serdes = 0;
9631
9632                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9633                 tp->nic_sram_data_cfg = nic_cfg;
9634
9635                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9636                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9637                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9638                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9639                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9640                     (ver > 0) && (ver < 0x100))
9641                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9642
9643                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9644                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9645                         eeprom_phy_serdes = 1;
9646
9647                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9648                 if (nic_phy_id != 0) {
9649                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9650                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9651
9652                         eeprom_phy_id  = (id1 >> 16) << 10;
9653                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9654                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9655                 } else
9656                         eeprom_phy_id = 0;
9657
9658                 tp->phy_id = eeprom_phy_id;
9659                 if (eeprom_phy_serdes) {
9660                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9661                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9662                         else
9663                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9664                 }
9665
9666                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9667                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9668                                     SHASTA_EXT_LED_MODE_MASK);
9669                 else
9670                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9671
9672                 switch (led_cfg) {
9673                 default:
9674                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9675                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9676                         break;
9677
9678                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9679                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9680                         break;
9681
9682                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9683                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9684
9685                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9686                          * read on some older 5700/5701 bootcode.
9687                          */
9688                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9689                             ASIC_REV_5700 ||
9690                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9691                             ASIC_REV_5701)
9692                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9693
9694                         break;
9695
9696                 case SHASTA_EXT_LED_SHARED:
9697                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9698                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9699                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9700                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9701                                                  LED_CTRL_MODE_PHY_2);
9702                         break;
9703
9704                 case SHASTA_EXT_LED_MAC:
9705                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9706                         break;
9707
9708                 case SHASTA_EXT_LED_COMBO:
9709                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9710                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9711                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9712                                                  LED_CTRL_MODE_PHY_2);
9713                         break;
9714
9715                 };
9716
9717                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9718                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9719                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9720                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9721
9722                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
9723                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9724                 else
9725                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9726
9727                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9728                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9729                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9730                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9731                 }
9732                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9733                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9734
9735                 if (cfg2 & (1 << 17))
9736                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9737
9738                 /* serdes signal pre-emphasis in register 0x590 set by */
9739                 /* bootcode if bit 18 is set */
9740                 if (cfg2 & (1 << 18))
9741                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9742         }
9743 }
9744
9745 static int __devinit tg3_phy_probe(struct tg3 *tp)
9746 {
9747         u32 hw_phy_id_1, hw_phy_id_2;
9748         u32 hw_phy_id, hw_phy_id_masked;
9749         int err;
9750
9751         /* Reading the PHY ID register can conflict with ASF
9752          * firwmare access to the PHY hardware.
9753          */
9754         err = 0;
9755         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9756                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9757         } else {
9758                 /* Now read the physical PHY_ID from the chip and verify
9759                  * that it is sane.  If it doesn't look good, we fall back
9760                  * to either the hard-coded table based PHY_ID and failing
9761                  * that the value found in the eeprom area.
9762                  */
9763                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9764                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9765
9766                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9767                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9768                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9769
9770                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9771         }
9772
9773         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9774                 tp->phy_id = hw_phy_id;
9775                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9776                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9777                 else
9778                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9779         } else {
9780                 if (tp->phy_id != PHY_ID_INVALID) {
9781                         /* Do nothing, phy ID already set up in
9782                          * tg3_get_eeprom_hw_cfg().
9783                          */
9784                 } else {
9785                         struct subsys_tbl_ent *p;
9786
9787                         /* No eeprom signature?  Try the hardcoded
9788                          * subsys device table.
9789                          */
9790                         p = lookup_by_subsys(tp);
9791                         if (!p)
9792                                 return -ENODEV;
9793
9794                         tp->phy_id = p->phy_id;
9795                         if (!tp->phy_id ||
9796                             tp->phy_id == PHY_ID_BCM8002)
9797                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9798                 }
9799         }
9800
9801         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9802             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9803                 u32 bmsr, adv_reg, tg3_ctrl;
9804
9805                 tg3_readphy(tp, MII_BMSR, &bmsr);
9806                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9807                     (bmsr & BMSR_LSTATUS))
9808                         goto skip_phy_reset;
9809                     
9810                 err = tg3_phy_reset(tp);
9811                 if (err)
9812                         return err;
9813
9814                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9815                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9816                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9817                 tg3_ctrl = 0;
9818                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9819                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9820                                     MII_TG3_CTRL_ADV_1000_FULL);
9821                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9822                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9823                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9824                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9825                 }
9826
9827                 if (!tg3_copper_is_advertising_all(tp)) {
9828                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9829
9830                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9831                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9832
9833                         tg3_writephy(tp, MII_BMCR,
9834                                      BMCR_ANENABLE | BMCR_ANRESTART);
9835                 }
9836                 tg3_phy_set_wirespeed(tp);
9837
9838                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9839                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9840                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9841         }
9842
9843 skip_phy_reset:
9844         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9845                 err = tg3_init_5401phy_dsp(tp);
9846                 if (err)
9847                         return err;
9848         }
9849
9850         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9851                 err = tg3_init_5401phy_dsp(tp);
9852         }
9853
9854         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9855                 tp->link_config.advertising =
9856                         (ADVERTISED_1000baseT_Half |
9857                          ADVERTISED_1000baseT_Full |
9858                          ADVERTISED_Autoneg |
9859                          ADVERTISED_FIBRE);
9860         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9861                 tp->link_config.advertising &=
9862                         ~(ADVERTISED_1000baseT_Half |
9863                           ADVERTISED_1000baseT_Full);
9864
9865         return err;
9866 }
9867
9868 static void __devinit tg3_read_partno(struct tg3 *tp)
9869 {
9870         unsigned char vpd_data[256];
9871         int i;
9872         u32 magic;
9873
9874         if (tg3_nvram_read_swab(tp, 0x0, &magic))
9875                 goto out_not_found;
9876
9877         if (magic == TG3_EEPROM_MAGIC) {
9878                 for (i = 0; i < 256; i += 4) {
9879                         u32 tmp;
9880
9881                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9882                                 goto out_not_found;
9883
9884                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9885                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9886                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9887                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9888                 }
9889         } else {
9890                 int vpd_cap;
9891
9892                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
9893                 for (i = 0; i < 256; i += 4) {
9894                         u32 tmp, j = 0;
9895                         u16 tmp16;
9896
9897                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
9898                                               i);
9899                         while (j++ < 100) {
9900                                 pci_read_config_word(tp->pdev, vpd_cap +
9901                                                      PCI_VPD_ADDR, &tmp16);
9902                                 if (tmp16 & 0x8000)
9903                                         break;
9904                                 msleep(1);
9905                         }
9906                         if (!(tmp16 & 0x8000))
9907                                 goto out_not_found;
9908
9909                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9910                                               &tmp);
9911                         tmp = cpu_to_le32(tmp);
9912                         memcpy(&vpd_data[i], &tmp, 4);
9913                 }
9914         }
9915
9916         /* Now parse and find the part number. */
9917         for (i = 0; i < 256; ) {
9918                 unsigned char val = vpd_data[i];
9919                 int block_end;
9920
9921                 if (val == 0x82 || val == 0x91) {
9922                         i = (i + 3 +
9923                              (vpd_data[i + 1] +
9924                               (vpd_data[i + 2] << 8)));
9925                         continue;
9926                 }
9927
9928                 if (val != 0x90)
9929                         goto out_not_found;
9930
9931                 block_end = (i + 3 +
9932                              (vpd_data[i + 1] +
9933                               (vpd_data[i + 2] << 8)));
9934                 i += 3;
9935                 while (i < block_end) {
9936                         if (vpd_data[i + 0] == 'P' &&
9937                             vpd_data[i + 1] == 'N') {
9938                                 int partno_len = vpd_data[i + 2];
9939
9940                                 if (partno_len > 24)
9941                                         goto out_not_found;
9942
9943                                 memcpy(tp->board_part_number,
9944                                        &vpd_data[i + 3],
9945                                        partno_len);
9946
9947                                 /* Success. */
9948                                 return;
9949                         }
9950                 }
9951
9952                 /* Part number not found. */
9953                 goto out_not_found;
9954         }
9955
9956 out_not_found:
9957         strcpy(tp->board_part_number, "none");
9958 }
9959
9960 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
9961 {
9962         u32 val, offset, start;
9963
9964         if (tg3_nvram_read_swab(tp, 0, &val))
9965                 return;
9966
9967         if (val != TG3_EEPROM_MAGIC)
9968                 return;
9969
9970         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
9971             tg3_nvram_read_swab(tp, 0x4, &start))
9972                 return;
9973
9974         offset = tg3_nvram_logical_addr(tp, offset);
9975         if (tg3_nvram_read_swab(tp, offset, &val))
9976                 return;
9977
9978         if ((val & 0xfc000000) == 0x0c000000) {
9979                 u32 ver_offset, addr;
9980                 int i;
9981
9982                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
9983                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
9984                         return;
9985
9986                 if (val != 0)
9987                         return;
9988
9989                 addr = offset + ver_offset - start;
9990                 for (i = 0; i < 16; i += 4) {
9991                         if (tg3_nvram_read(tp, addr + i, &val))
9992                                 return;
9993
9994                         val = cpu_to_le32(val);
9995                         memcpy(tp->fw_ver + i, &val, 4);
9996                 }
9997         }
9998 }
9999
10000 static int __devinit tg3_get_invariants(struct tg3 *tp)
10001 {
10002         static struct pci_device_id write_reorder_chipsets[] = {
10003                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10004                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10005                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10006                              PCI_DEVICE_ID_VIA_8385_0) },
10007                 { },
10008         };
10009         u32 misc_ctrl_reg;
10010         u32 cacheline_sz_reg;
10011         u32 pci_state_reg, grc_misc_cfg;
10012         u32 val;
10013         u16 pci_cmd;
10014         int err;
10015
10016         /* Force memory write invalidate off.  If we leave it on,
10017          * then on 5700_BX chips we have to enable a workaround.
10018          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10019          * to match the cacheline size.  The Broadcom driver have this
10020          * workaround but turns MWI off all the times so never uses
10021          * it.  This seems to suggest that the workaround is insufficient.
10022          */
10023         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10024         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10025         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10026
10027         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10028          * has the register indirect write enable bit set before
10029          * we try to access any of the MMIO registers.  It is also
10030          * critical that the PCI-X hw workaround situation is decided
10031          * before that as well.
10032          */
10033         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10034                               &misc_ctrl_reg);
10035
10036         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10037                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10038
10039         /* Wrong chip ID in 5752 A0. This code can be removed later
10040          * as A0 is not in production.
10041          */
10042         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10043                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10044
10045         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10046          * we need to disable memory and use config. cycles
10047          * only to access all registers. The 5702/03 chips
10048          * can mistakenly decode the special cycles from the
10049          * ICH chipsets as memory write cycles, causing corruption
10050          * of register and memory space. Only certain ICH bridges
10051          * will drive special cycles with non-zero data during the
10052          * address phase which can fall within the 5703's address
10053          * range. This is not an ICH bug as the PCI spec allows
10054          * non-zero address during special cycles. However, only
10055          * these ICH bridges are known to drive non-zero addresses
10056          * during special cycles.
10057          *
10058          * Since special cycles do not cross PCI bridges, we only
10059          * enable this workaround if the 5703 is on the secondary
10060          * bus of these ICH bridges.
10061          */
10062         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10063             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10064                 static struct tg3_dev_id {
10065                         u32     vendor;
10066                         u32     device;
10067                         u32     rev;
10068                 } ich_chipsets[] = {
10069                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10070                           PCI_ANY_ID },
10071                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10072                           PCI_ANY_ID },
10073                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10074                           0xa },
10075                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10076                           PCI_ANY_ID },
10077                         { },
10078                 };
10079                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10080                 struct pci_dev *bridge = NULL;
10081
10082                 while (pci_id->vendor != 0) {
10083                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10084                                                 bridge);
10085                         if (!bridge) {
10086                                 pci_id++;
10087                                 continue;
10088                         }
10089                         if (pci_id->rev != PCI_ANY_ID) {
10090                                 u8 rev;
10091
10092                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10093                                                      &rev);
10094                                 if (rev > pci_id->rev)
10095                                         continue;
10096                         }
10097                         if (bridge->subordinate &&
10098                             (bridge->subordinate->number ==
10099                              tp->pdev->bus->number)) {
10100
10101                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10102                                 pci_dev_put(bridge);
10103                                 break;
10104                         }
10105                 }
10106         }
10107
10108         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10109          * DMA addresses > 40-bit. This bridge may have other additional
10110          * 57xx devices behind it in some 4-port NIC designs for example.
10111          * Any tg3 device found behind the bridge will also need the 40-bit
10112          * DMA workaround.
10113          */
10114         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10115             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10116                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10117                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10118                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10119         }
10120         else {
10121                 struct pci_dev *bridge = NULL;
10122
10123                 do {
10124                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10125                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10126                                                 bridge);
10127                         if (bridge && bridge->subordinate &&
10128                             (bridge->subordinate->number <=
10129                              tp->pdev->bus->number) &&
10130                             (bridge->subordinate->subordinate >=
10131                              tp->pdev->bus->number)) {
10132                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10133                                 pci_dev_put(bridge);
10134                                 break;
10135                         }
10136                 } while (bridge);
10137         }
10138
10139         /* Initialize misc host control in PCI block. */
10140         tp->misc_host_ctrl |= (misc_ctrl_reg &
10141                                MISC_HOST_CTRL_CHIPREV);
10142         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10143                                tp->misc_host_ctrl);
10144
10145         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10146                               &cacheline_sz_reg);
10147
10148         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10149         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10150         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10151         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10152
10153         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10154             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10155             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10156             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10157             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10158                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10159
10160         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10161             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10162                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10163
10164         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10165                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10166                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10167                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10168                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10169                 } else
10170                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1;
10171         }
10172
10173         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10174             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10175             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10176             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10177             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10178                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10179
10180         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10181                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10182
10183         /* If we have an AMD 762 or VIA K8T800 chipset, write
10184          * reordering to the mailbox registers done by the host
10185          * controller can cause major troubles.  We read back from
10186          * every mailbox register write to force the writes to be
10187          * posted to the chip in order.
10188          */
10189         if (pci_dev_present(write_reorder_chipsets) &&
10190             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10191                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10192
10193         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10194             tp->pci_lat_timer < 64) {
10195                 tp->pci_lat_timer = 64;
10196
10197                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10198                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10199                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10200                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10201
10202                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10203                                        cacheline_sz_reg);
10204         }
10205
10206         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10207                               &pci_state_reg);
10208
10209         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10210                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10211
10212                 /* If this is a 5700 BX chipset, and we are in PCI-X
10213                  * mode, enable register write workaround.
10214                  *
10215                  * The workaround is to use indirect register accesses
10216                  * for all chip writes not to mailbox registers.
10217                  */
10218                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10219                         u32 pm_reg;
10220                         u16 pci_cmd;
10221
10222                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10223
10224                         /* The chip can have it's power management PCI config
10225                          * space registers clobbered due to this bug.
10226                          * So explicitly force the chip into D0 here.
10227                          */
10228                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10229                                               &pm_reg);
10230                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10231                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10232                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10233                                                pm_reg);
10234
10235                         /* Also, force SERR#/PERR# in PCI command. */
10236                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10237                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10238                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10239                 }
10240         }
10241
10242         /* 5700 BX chips need to have their TX producer index mailboxes
10243          * written twice to workaround a bug.
10244          */
10245         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10246                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10247
10248         /* Back to back register writes can cause problems on this chip,
10249          * the workaround is to read back all reg writes except those to
10250          * mailbox regs.  See tg3_write_indirect_reg32().
10251          *
10252          * PCI Express 5750_A0 rev chips need this workaround too.
10253          */
10254         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10255             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10256              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10257                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10258
10259         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10260                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10261         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10262                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10263
10264         /* Chip-specific fixup from Broadcom driver */
10265         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10266             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10267                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10268                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10269         }
10270
10271         /* Default fast path register access methods */
10272         tp->read32 = tg3_read32;
10273         tp->write32 = tg3_write32;
10274         tp->read32_mbox = tg3_read32;
10275         tp->write32_mbox = tg3_write32;
10276         tp->write32_tx_mbox = tg3_write32;
10277         tp->write32_rx_mbox = tg3_write32;
10278
10279         /* Various workaround register access methods */
10280         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10281                 tp->write32 = tg3_write_indirect_reg32;
10282         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10283                 tp->write32 = tg3_write_flush_reg32;
10284
10285         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10286             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10287                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10288                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10289                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10290         }
10291
10292         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10293                 tp->read32 = tg3_read_indirect_reg32;
10294                 tp->write32 = tg3_write_indirect_reg32;
10295                 tp->read32_mbox = tg3_read_indirect_mbox;
10296                 tp->write32_mbox = tg3_write_indirect_mbox;
10297                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10298                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10299
10300                 iounmap(tp->regs);
10301                 tp->regs = NULL;
10302
10303                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10304                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10305                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10306         }
10307
10308         if (tp->write32 == tg3_write_indirect_reg32 ||
10309             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10310              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10311               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10312                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10313
10314         /* Get eeprom hw config before calling tg3_set_power_state().
10315          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10316          * determined before calling tg3_set_power_state() so that
10317          * we know whether or not to switch out of Vaux power.
10318          * When the flag is set, it means that GPIO1 is used for eeprom
10319          * write protect and also implies that it is a LOM where GPIOs
10320          * are not used to switch power.
10321          */ 
10322         tg3_get_eeprom_hw_cfg(tp);
10323
10324         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10325          * GPIO1 driven high will bring 5700's external PHY out of reset.
10326          * It is also used as eeprom write protect on LOMs.
10327          */
10328         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10329         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10330             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10331                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10332                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10333         /* Unused GPIO3 must be driven as output on 5752 because there
10334          * are no pull-up resistors on unused GPIO pins.
10335          */
10336         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10337                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10338
10339         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10340                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10341
10342         /* Force the chip into D0. */
10343         err = tg3_set_power_state(tp, PCI_D0);
10344         if (err) {
10345                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10346                        pci_name(tp->pdev));
10347                 return err;
10348         }
10349
10350         /* 5700 B0 chips do not support checksumming correctly due
10351          * to hardware bugs.
10352          */
10353         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10354                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10355
10356         /* Derive initial jumbo mode from MTU assigned in
10357          * ether_setup() via the alloc_etherdev() call
10358          */
10359         if (tp->dev->mtu > ETH_DATA_LEN &&
10360             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10361                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10362
10363         /* Determine WakeOnLan speed to use. */
10364         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10365             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10366             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10367             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10368                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10369         } else {
10370                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10371         }
10372
10373         /* A few boards don't want Ethernet@WireSpeed phy feature */
10374         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10375             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10376              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10377              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10378             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10379                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10380
10381         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10382             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10383                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10384         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10385                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10386
10387         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10388                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10389                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10390                         tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10391                 else
10392                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10393         }
10394
10395         tp->coalesce_mode = 0;
10396         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10397             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10398                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10399
10400         /* Initialize MAC MI mode, polling disabled. */
10401         tw32_f(MAC_MI_MODE, tp->mi_mode);
10402         udelay(80);
10403
10404         /* Initialize data/descriptor byte/word swapping. */
10405         val = tr32(GRC_MODE);
10406         val &= GRC_MODE_HOST_STACKUP;
10407         tw32(GRC_MODE, val | tp->grc_mode);
10408
10409         tg3_switch_clocks(tp);
10410
10411         /* Clear this out for sanity. */
10412         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10413
10414         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10415                               &pci_state_reg);
10416         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10417             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10418                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10419
10420                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10421                     chiprevid == CHIPREV_ID_5701_B0 ||
10422                     chiprevid == CHIPREV_ID_5701_B2 ||
10423                     chiprevid == CHIPREV_ID_5701_B5) {
10424                         void __iomem *sram_base;
10425
10426                         /* Write some dummy words into the SRAM status block
10427                          * area, see if it reads back correctly.  If the return
10428                          * value is bad, force enable the PCIX workaround.
10429                          */
10430                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10431
10432                         writel(0x00000000, sram_base);
10433                         writel(0x00000000, sram_base + 4);
10434                         writel(0xffffffff, sram_base + 4);
10435                         if (readl(sram_base) != 0x00000000)
10436                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10437                 }
10438         }
10439
10440         udelay(50);
10441         tg3_nvram_init(tp);
10442
10443         grc_misc_cfg = tr32(GRC_MISC_CFG);
10444         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10445
10446         /* Broadcom's driver says that CIOBE multisplit has a bug */
10447 #if 0
10448         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10449             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10450                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10451                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10452         }
10453 #endif
10454         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10455             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10456              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10457                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10458
10459         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10460             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10461                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10462         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10463                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10464                                       HOSTCC_MODE_CLRTICK_TXBD);
10465
10466                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10467                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10468                                        tp->misc_host_ctrl);
10469         }
10470
10471         /* these are limited to 10/100 only */
10472         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10473              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10474             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10475              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10476              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10477               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10478               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10479             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10480              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10481               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10482                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10483
10484         err = tg3_phy_probe(tp);
10485         if (err) {
10486                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10487                        pci_name(tp->pdev), err);
10488                 /* ... but do not return immediately ... */
10489         }
10490
10491         tg3_read_partno(tp);
10492         tg3_read_fw_ver(tp);
10493
10494         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10495                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10496         } else {
10497                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10498                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10499                 else
10500                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10501         }
10502
10503         /* 5700 {AX,BX} chips have a broken status block link
10504          * change bit implementation, so we must use the
10505          * status register in those cases.
10506          */
10507         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10508                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10509         else
10510                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10511
10512         /* The led_ctrl is set during tg3_phy_probe, here we might
10513          * have to force the link status polling mechanism based
10514          * upon subsystem IDs.
10515          */
10516         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10517             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10518                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10519                                   TG3_FLAG_USE_LINKCHG_REG);
10520         }
10521
10522         /* For all SERDES we poll the MAC status register. */
10523         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10524                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10525         else
10526                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10527
10528         /* All chips before 5787 can get confused if TX buffers
10529          * straddle the 4GB address boundary in some cases.
10530          */
10531         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10532             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10533                 tp->dev->hard_start_xmit = tg3_start_xmit;
10534         else
10535                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10536
10537         tp->rx_offset = 2;
10538         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10539             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10540                 tp->rx_offset = 0;
10541
10542         /* By default, disable wake-on-lan.  User can change this
10543          * using ETHTOOL_SWOL.
10544          */
10545         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10546
10547         return err;
10548 }
10549
10550 #ifdef CONFIG_SPARC64
10551 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10552 {
10553         struct net_device *dev = tp->dev;
10554         struct pci_dev *pdev = tp->pdev;
10555         struct pcidev_cookie *pcp = pdev->sysdata;
10556
10557         if (pcp != NULL) {
10558                 int node = pcp->prom_node;
10559
10560                 if (prom_getproplen(node, "local-mac-address") == 6) {
10561                         prom_getproperty(node, "local-mac-address",
10562                                          dev->dev_addr, 6);
10563                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10564                         return 0;
10565                 }
10566         }
10567         return -ENODEV;
10568 }
10569
10570 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10571 {
10572         struct net_device *dev = tp->dev;
10573
10574         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10575         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10576         return 0;
10577 }
10578 #endif
10579
10580 static int __devinit tg3_get_device_address(struct tg3 *tp)
10581 {
10582         struct net_device *dev = tp->dev;
10583         u32 hi, lo, mac_offset;
10584         int addr_ok = 0;
10585
10586 #ifdef CONFIG_SPARC64
10587         if (!tg3_get_macaddr_sparc(tp))
10588                 return 0;
10589 #endif
10590
10591         mac_offset = 0x7c;
10592         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10593             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10594                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10595                         mac_offset = 0xcc;
10596                 if (tg3_nvram_lock(tp))
10597                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10598                 else
10599                         tg3_nvram_unlock(tp);
10600         }
10601
10602         /* First try to get it from MAC address mailbox. */
10603         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10604         if ((hi >> 16) == 0x484b) {
10605                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10606                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10607
10608                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10609                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10610                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10611                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10612                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10613
10614                 /* Some old bootcode may report a 0 MAC address in SRAM */
10615                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10616         }
10617         if (!addr_ok) {
10618                 /* Next, try NVRAM. */
10619                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10620                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10621                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
10622                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
10623                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
10624                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
10625                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
10626                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
10627                 }
10628                 /* Finally just fetch it out of the MAC control regs. */
10629                 else {
10630                         hi = tr32(MAC_ADDR_0_HIGH);
10631                         lo = tr32(MAC_ADDR_0_LOW);
10632
10633                         dev->dev_addr[5] = lo & 0xff;
10634                         dev->dev_addr[4] = (lo >> 8) & 0xff;
10635                         dev->dev_addr[3] = (lo >> 16) & 0xff;
10636                         dev->dev_addr[2] = (lo >> 24) & 0xff;
10637                         dev->dev_addr[1] = hi & 0xff;
10638                         dev->dev_addr[0] = (hi >> 8) & 0xff;
10639                 }
10640         }
10641
10642         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10643 #ifdef CONFIG_SPARC64
10644                 if (!tg3_get_default_macaddr_sparc(tp))
10645                         return 0;
10646 #endif
10647                 return -EINVAL;
10648         }
10649         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10650         return 0;
10651 }
10652
10653 #define BOUNDARY_SINGLE_CACHELINE       1
10654 #define BOUNDARY_MULTI_CACHELINE        2
10655
10656 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10657 {
10658         int cacheline_size;
10659         u8 byte;
10660         int goal;
10661
10662         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10663         if (byte == 0)
10664                 cacheline_size = 1024;
10665         else
10666                 cacheline_size = (int) byte * 4;
10667
10668         /* On 5703 and later chips, the boundary bits have no
10669          * effect.
10670          */
10671         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10672             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10673             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10674                 goto out;
10675
10676 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10677         goal = BOUNDARY_MULTI_CACHELINE;
10678 #else
10679 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10680         goal = BOUNDARY_SINGLE_CACHELINE;
10681 #else
10682         goal = 0;
10683 #endif
10684 #endif
10685
10686         if (!goal)
10687                 goto out;
10688
10689         /* PCI controllers on most RISC systems tend to disconnect
10690          * when a device tries to burst across a cache-line boundary.
10691          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10692          *
10693          * Unfortunately, for PCI-E there are only limited
10694          * write-side controls for this, and thus for reads
10695          * we will still get the disconnects.  We'll also waste
10696          * these PCI cycles for both read and write for chips
10697          * other than 5700 and 5701 which do not implement the
10698          * boundary bits.
10699          */
10700         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10701             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10702                 switch (cacheline_size) {
10703                 case 16:
10704                 case 32:
10705                 case 64:
10706                 case 128:
10707                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10708                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10709                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10710                         } else {
10711                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10712                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10713                         }
10714                         break;
10715
10716                 case 256:
10717                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10718                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10719                         break;
10720
10721                 default:
10722                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10723                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10724                         break;
10725                 };
10726         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10727                 switch (cacheline_size) {
10728                 case 16:
10729                 case 32:
10730                 case 64:
10731                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10732                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10733                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10734                                 break;
10735                         }
10736                         /* fallthrough */
10737                 case 128:
10738                 default:
10739                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10740                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10741                         break;
10742                 };
10743         } else {
10744                 switch (cacheline_size) {
10745                 case 16:
10746                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10747                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10748                                         DMA_RWCTRL_WRITE_BNDRY_16);
10749                                 break;
10750                         }
10751                         /* fallthrough */
10752                 case 32:
10753                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10754                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10755                                         DMA_RWCTRL_WRITE_BNDRY_32);
10756                                 break;
10757                         }
10758                         /* fallthrough */
10759                 case 64:
10760                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10761                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10762                                         DMA_RWCTRL_WRITE_BNDRY_64);
10763                                 break;
10764                         }
10765                         /* fallthrough */
10766                 case 128:
10767                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10768                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10769                                         DMA_RWCTRL_WRITE_BNDRY_128);
10770                                 break;
10771                         }
10772                         /* fallthrough */
10773                 case 256:
10774                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10775                                 DMA_RWCTRL_WRITE_BNDRY_256);
10776                         break;
10777                 case 512:
10778                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10779                                 DMA_RWCTRL_WRITE_BNDRY_512);
10780                         break;
10781                 case 1024:
10782                 default:
10783                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10784                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10785                         break;
10786                 };
10787         }
10788
10789 out:
10790         return val;
10791 }
10792
10793 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10794 {
10795         struct tg3_internal_buffer_desc test_desc;
10796         u32 sram_dma_descs;
10797         int i, ret;
10798
10799         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10800
10801         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10802         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10803         tw32(RDMAC_STATUS, 0);
10804         tw32(WDMAC_STATUS, 0);
10805
10806         tw32(BUFMGR_MODE, 0);
10807         tw32(FTQ_RESET, 0);
10808
10809         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10810         test_desc.addr_lo = buf_dma & 0xffffffff;
10811         test_desc.nic_mbuf = 0x00002100;
10812         test_desc.len = size;
10813
10814         /*
10815          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10816          * the *second* time the tg3 driver was getting loaded after an
10817          * initial scan.
10818          *
10819          * Broadcom tells me:
10820          *   ...the DMA engine is connected to the GRC block and a DMA
10821          *   reset may affect the GRC block in some unpredictable way...
10822          *   The behavior of resets to individual blocks has not been tested.
10823          *
10824          * Broadcom noted the GRC reset will also reset all sub-components.
10825          */
10826         if (to_device) {
10827                 test_desc.cqid_sqid = (13 << 8) | 2;
10828
10829                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10830                 udelay(40);
10831         } else {
10832                 test_desc.cqid_sqid = (16 << 8) | 7;
10833
10834                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10835                 udelay(40);
10836         }
10837         test_desc.flags = 0x00000005;
10838
10839         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10840                 u32 val;
10841
10842                 val = *(((u32 *)&test_desc) + i);
10843                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10844                                        sram_dma_descs + (i * sizeof(u32)));
10845                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10846         }
10847         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10848
10849         if (to_device) {
10850                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10851         } else {
10852                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10853         }
10854
10855         ret = -ENODEV;
10856         for (i = 0; i < 40; i++) {
10857                 u32 val;
10858
10859                 if (to_device)
10860                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10861                 else
10862                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10863                 if ((val & 0xffff) == sram_dma_descs) {
10864                         ret = 0;
10865                         break;
10866                 }
10867
10868                 udelay(100);
10869         }
10870
10871         return ret;
10872 }
10873
10874 #define TEST_BUFFER_SIZE        0x2000
10875
10876 static int __devinit tg3_test_dma(struct tg3 *tp)
10877 {
10878         dma_addr_t buf_dma;
10879         u32 *buf, saved_dma_rwctrl;
10880         int ret;
10881
10882         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10883         if (!buf) {
10884                 ret = -ENOMEM;
10885                 goto out_nofree;
10886         }
10887
10888         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10889                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10890
10891         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10892
10893         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10894                 /* DMA read watermark not used on PCIE */
10895                 tp->dma_rwctrl |= 0x00180000;
10896         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10897                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10898                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10899                         tp->dma_rwctrl |= 0x003f0000;
10900                 else
10901                         tp->dma_rwctrl |= 0x003f000f;
10902         } else {
10903                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10904                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10905                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10906
10907                         /* If the 5704 is behind the EPB bridge, we can
10908                          * do the less restrictive ONE_DMA workaround for
10909                          * better performance.
10910                          */
10911                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10912                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10913                                 tp->dma_rwctrl |= 0x8000;
10914                         else if (ccval == 0x6 || ccval == 0x7)
10915                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10916
10917                         /* Set bit 23 to enable PCIX hw bug fix */
10918                         tp->dma_rwctrl |= 0x009f0000;
10919                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10920                         /* 5780 always in PCIX mode */
10921                         tp->dma_rwctrl |= 0x00144000;
10922                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10923                         /* 5714 always in PCIX mode */
10924                         tp->dma_rwctrl |= 0x00148000;
10925                 } else {
10926                         tp->dma_rwctrl |= 0x001b000f;
10927                 }
10928         }
10929
10930         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10931             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10932                 tp->dma_rwctrl &= 0xfffffff0;
10933
10934         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10935             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10936                 /* Remove this if it causes problems for some boards. */
10937                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10938
10939                 /* On 5700/5701 chips, we need to set this bit.
10940                  * Otherwise the chip will issue cacheline transactions
10941                  * to streamable DMA memory with not all the byte
10942                  * enables turned on.  This is an error on several
10943                  * RISC PCI controllers, in particular sparc64.
10944                  *
10945                  * On 5703/5704 chips, this bit has been reassigned
10946                  * a different meaning.  In particular, it is used
10947                  * on those chips to enable a PCI-X workaround.
10948                  */
10949                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10950         }
10951
10952         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10953
10954 #if 0
10955         /* Unneeded, already done by tg3_get_invariants.  */
10956         tg3_switch_clocks(tp);
10957 #endif
10958
10959         ret = 0;
10960         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10961             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10962                 goto out;
10963
10964         /* It is best to perform DMA test with maximum write burst size
10965          * to expose the 5700/5701 write DMA bug.
10966          */
10967         saved_dma_rwctrl = tp->dma_rwctrl;
10968         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10969         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10970
10971         while (1) {
10972                 u32 *p = buf, i;
10973
10974                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10975                         p[i] = i;
10976
10977                 /* Send the buffer to the chip. */
10978                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10979                 if (ret) {
10980                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10981                         break;
10982                 }
10983
10984 #if 0
10985                 /* validate data reached card RAM correctly. */
10986                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10987                         u32 val;
10988                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10989                         if (le32_to_cpu(val) != p[i]) {
10990                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10991                                 /* ret = -ENODEV here? */
10992                         }
10993                         p[i] = 0;
10994                 }
10995 #endif
10996                 /* Now read it back. */
10997                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10998                 if (ret) {
10999                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11000
11001                         break;
11002                 }
11003
11004                 /* Verify it. */
11005                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11006                         if (p[i] == i)
11007                                 continue;
11008
11009                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11010                             DMA_RWCTRL_WRITE_BNDRY_16) {
11011                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11012                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11013                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11014                                 break;
11015                         } else {
11016                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11017                                 ret = -ENODEV;
11018                                 goto out;
11019                         }
11020                 }
11021
11022                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11023                         /* Success. */
11024                         ret = 0;
11025                         break;
11026                 }
11027         }
11028         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11029             DMA_RWCTRL_WRITE_BNDRY_16) {
11030                 static struct pci_device_id dma_wait_state_chipsets[] = {
11031                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11032                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11033                         { },
11034                 };
11035
11036                 /* DMA test passed without adjusting DMA boundary,
11037                  * now look for chipsets that are known to expose the
11038                  * DMA bug without failing the test.
11039                  */
11040                 if (pci_dev_present(dma_wait_state_chipsets)) {
11041                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11042                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11043                 }
11044                 else
11045                         /* Safe to use the calculated DMA boundary. */
11046                         tp->dma_rwctrl = saved_dma_rwctrl;
11047
11048                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11049         }
11050
11051 out:
11052         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11053 out_nofree:
11054         return ret;
11055 }
11056
11057 static void __devinit tg3_init_link_config(struct tg3 *tp)
11058 {
11059         tp->link_config.advertising =
11060                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11061                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11062                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11063                  ADVERTISED_Autoneg | ADVERTISED_MII);
11064         tp->link_config.speed = SPEED_INVALID;
11065         tp->link_config.duplex = DUPLEX_INVALID;
11066         tp->link_config.autoneg = AUTONEG_ENABLE;
11067         tp->link_config.active_speed = SPEED_INVALID;
11068         tp->link_config.active_duplex = DUPLEX_INVALID;
11069         tp->link_config.phy_is_low_power = 0;
11070         tp->link_config.orig_speed = SPEED_INVALID;
11071         tp->link_config.orig_duplex = DUPLEX_INVALID;
11072         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11073 }
11074
11075 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11076 {
11077         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11078                 tp->bufmgr_config.mbuf_read_dma_low_water =
11079                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11080                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11081                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11082                 tp->bufmgr_config.mbuf_high_water =
11083                         DEFAULT_MB_HIGH_WATER_5705;
11084
11085                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11086                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11087                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11088                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11089                 tp->bufmgr_config.mbuf_high_water_jumbo =
11090                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11091         } else {
11092                 tp->bufmgr_config.mbuf_read_dma_low_water =
11093                         DEFAULT_MB_RDMA_LOW_WATER;
11094                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11095                         DEFAULT_MB_MACRX_LOW_WATER;
11096                 tp->bufmgr_config.mbuf_high_water =
11097                         DEFAULT_MB_HIGH_WATER;
11098
11099                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11100                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11101                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11102                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11103                 tp->bufmgr_config.mbuf_high_water_jumbo =
11104                         DEFAULT_MB_HIGH_WATER_JUMBO;
11105         }
11106
11107         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11108         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11109 }
11110
11111 static char * __devinit tg3_phy_string(struct tg3 *tp)
11112 {
11113         switch (tp->phy_id & PHY_ID_MASK) {
11114         case PHY_ID_BCM5400:    return "5400";
11115         case PHY_ID_BCM5401:    return "5401";
11116         case PHY_ID_BCM5411:    return "5411";
11117         case PHY_ID_BCM5701:    return "5701";
11118         case PHY_ID_BCM5703:    return "5703";
11119         case PHY_ID_BCM5704:    return "5704";
11120         case PHY_ID_BCM5705:    return "5705";
11121         case PHY_ID_BCM5750:    return "5750";
11122         case PHY_ID_BCM5752:    return "5752";
11123         case PHY_ID_BCM5714:    return "5714";
11124         case PHY_ID_BCM5780:    return "5780";
11125         case PHY_ID_BCM5755:    return "5755";
11126         case PHY_ID_BCM5787:    return "5787";
11127         case PHY_ID_BCM8002:    return "8002/serdes";
11128         case 0:                 return "serdes";
11129         default:                return "unknown";
11130         };
11131 }
11132
11133 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11134 {
11135         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11136                 strcpy(str, "PCI Express");
11137                 return str;
11138         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11139                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11140
11141                 strcpy(str, "PCIX:");
11142
11143                 if ((clock_ctrl == 7) ||
11144                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11145                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11146                         strcat(str, "133MHz");
11147                 else if (clock_ctrl == 0)
11148                         strcat(str, "33MHz");
11149                 else if (clock_ctrl == 2)
11150                         strcat(str, "50MHz");
11151                 else if (clock_ctrl == 4)
11152                         strcat(str, "66MHz");
11153                 else if (clock_ctrl == 6)
11154                         strcat(str, "100MHz");
11155         } else {
11156                 strcpy(str, "PCI:");
11157                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11158                         strcat(str, "66MHz");
11159                 else
11160                         strcat(str, "33MHz");
11161         }
11162         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11163                 strcat(str, ":32-bit");
11164         else
11165                 strcat(str, ":64-bit");
11166         return str;
11167 }
11168
11169 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11170 {
11171         struct pci_dev *peer;
11172         unsigned int func, devnr = tp->pdev->devfn & ~7;
11173
11174         for (func = 0; func < 8; func++) {
11175                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11176                 if (peer && peer != tp->pdev)
11177                         break;
11178                 pci_dev_put(peer);
11179         }
11180         /* 5704 can be configured in single-port mode, set peer to
11181          * tp->pdev in that case.
11182          */
11183         if (!peer) {
11184                 peer = tp->pdev;
11185                 return peer;
11186         }
11187
11188         /*
11189          * We don't need to keep the refcount elevated; there's no way
11190          * to remove one half of this device without removing the other
11191          */
11192         pci_dev_put(peer);
11193
11194         return peer;
11195 }
11196
11197 static void __devinit tg3_init_coal(struct tg3 *tp)
11198 {
11199         struct ethtool_coalesce *ec = &tp->coal;
11200
11201         memset(ec, 0, sizeof(*ec));
11202         ec->cmd = ETHTOOL_GCOALESCE;
11203         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11204         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11205         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11206         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11207         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11208         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11209         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11210         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11211         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11212
11213         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11214                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11215                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11216                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11217                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11218                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11219         }
11220
11221         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11222                 ec->rx_coalesce_usecs_irq = 0;
11223                 ec->tx_coalesce_usecs_irq = 0;
11224                 ec->stats_block_coalesce_usecs = 0;
11225         }
11226 }
11227
11228 static int __devinit tg3_init_one(struct pci_dev *pdev,
11229                                   const struct pci_device_id *ent)
11230 {
11231         static int tg3_version_printed = 0;
11232         unsigned long tg3reg_base, tg3reg_len;
11233         struct net_device *dev;
11234         struct tg3 *tp;
11235         int i, err, pm_cap;
11236         char str[40];
11237         u64 dma_mask, persist_dma_mask;
11238
11239         if (tg3_version_printed++ == 0)
11240                 printk(KERN_INFO "%s", version);
11241
11242         err = pci_enable_device(pdev);
11243         if (err) {
11244                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11245                        "aborting.\n");
11246                 return err;
11247         }
11248
11249         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11250                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11251                        "base address, aborting.\n");
11252                 err = -ENODEV;
11253                 goto err_out_disable_pdev;
11254         }
11255
11256         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11257         if (err) {
11258                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11259                        "aborting.\n");
11260                 goto err_out_disable_pdev;
11261         }
11262
11263         pci_set_master(pdev);
11264
11265         /* Find power-management capability. */
11266         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11267         if (pm_cap == 0) {
11268                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11269                        "aborting.\n");
11270                 err = -EIO;
11271                 goto err_out_free_res;
11272         }
11273
11274         tg3reg_base = pci_resource_start(pdev, 0);
11275         tg3reg_len = pci_resource_len(pdev, 0);
11276
11277         dev = alloc_etherdev(sizeof(*tp));
11278         if (!dev) {
11279                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11280                 err = -ENOMEM;
11281                 goto err_out_free_res;
11282         }
11283
11284         SET_MODULE_OWNER(dev);
11285         SET_NETDEV_DEV(dev, &pdev->dev);
11286
11287         dev->features |= NETIF_F_LLTX;
11288 #if TG3_VLAN_TAG_USED
11289         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11290         dev->vlan_rx_register = tg3_vlan_rx_register;
11291         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11292 #endif
11293
11294         tp = netdev_priv(dev);
11295         tp->pdev = pdev;
11296         tp->dev = dev;
11297         tp->pm_cap = pm_cap;
11298         tp->mac_mode = TG3_DEF_MAC_MODE;
11299         tp->rx_mode = TG3_DEF_RX_MODE;
11300         tp->tx_mode = TG3_DEF_TX_MODE;
11301         tp->mi_mode = MAC_MI_MODE_BASE;
11302         if (tg3_debug > 0)
11303                 tp->msg_enable = tg3_debug;
11304         else
11305                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11306
11307         /* The word/byte swap controls here control register access byte
11308          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11309          * setting below.
11310          */
11311         tp->misc_host_ctrl =
11312                 MISC_HOST_CTRL_MASK_PCI_INT |
11313                 MISC_HOST_CTRL_WORD_SWAP |
11314                 MISC_HOST_CTRL_INDIR_ACCESS |
11315                 MISC_HOST_CTRL_PCISTATE_RW;
11316
11317         /* The NONFRM (non-frame) byte/word swap controls take effect
11318          * on descriptor entries, anything which isn't packet data.
11319          *
11320          * The StrongARM chips on the board (one for tx, one for rx)
11321          * are running in big-endian mode.
11322          */
11323         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11324                         GRC_MODE_WSWAP_NONFRM_DATA);
11325 #ifdef __BIG_ENDIAN
11326         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11327 #endif
11328         spin_lock_init(&tp->lock);
11329         spin_lock_init(&tp->tx_lock);
11330         spin_lock_init(&tp->indirect_lock);
11331         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11332
11333         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11334         if (tp->regs == 0UL) {
11335                 printk(KERN_ERR PFX "Cannot map device registers, "
11336                        "aborting.\n");
11337                 err = -ENOMEM;
11338                 goto err_out_free_dev;
11339         }
11340
11341         tg3_init_link_config(tp);
11342
11343         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11344         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11345         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11346
11347         dev->open = tg3_open;
11348         dev->stop = tg3_close;
11349         dev->get_stats = tg3_get_stats;
11350         dev->set_multicast_list = tg3_set_rx_mode;
11351         dev->set_mac_address = tg3_set_mac_addr;
11352         dev->do_ioctl = tg3_ioctl;
11353         dev->tx_timeout = tg3_tx_timeout;
11354         dev->poll = tg3_poll;
11355         dev->ethtool_ops = &tg3_ethtool_ops;
11356         dev->weight = 64;
11357         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11358         dev->change_mtu = tg3_change_mtu;
11359         dev->irq = pdev->irq;
11360 #ifdef CONFIG_NET_POLL_CONTROLLER
11361         dev->poll_controller = tg3_poll_controller;
11362 #endif
11363
11364         err = tg3_get_invariants(tp);
11365         if (err) {
11366                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11367                        "aborting.\n");
11368                 goto err_out_iounmap;
11369         }
11370
11371         /* The EPB bridge inside 5714, 5715, and 5780 and any
11372          * device behind the EPB cannot support DMA addresses > 40-bit.
11373          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11374          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11375          * do DMA address check in tg3_start_xmit().
11376          */
11377         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11378                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11379         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11380                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11381 #ifdef CONFIG_HIGHMEM
11382                 dma_mask = DMA_64BIT_MASK;
11383 #endif
11384         } else
11385                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11386
11387         /* Configure DMA attributes. */
11388         if (dma_mask > DMA_32BIT_MASK) {
11389                 err = pci_set_dma_mask(pdev, dma_mask);
11390                 if (!err) {
11391                         dev->features |= NETIF_F_HIGHDMA;
11392                         err = pci_set_consistent_dma_mask(pdev,
11393                                                           persist_dma_mask);
11394                         if (err < 0) {
11395                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11396                                        "DMA for consistent allocations\n");
11397                                 goto err_out_iounmap;
11398                         }
11399                 }
11400         }
11401         if (err || dma_mask == DMA_32BIT_MASK) {
11402                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11403                 if (err) {
11404                         printk(KERN_ERR PFX "No usable DMA configuration, "
11405                                "aborting.\n");
11406                         goto err_out_iounmap;
11407                 }
11408         }
11409
11410         tg3_init_bufmgr_config(tp);
11411
11412 #if TG3_TSO_SUPPORT != 0
11413         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11414                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11415         }
11416         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11417             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11418             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11419             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11420                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11421         } else {
11422                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11423         }
11424
11425         /* TSO is on by default on chips that support hardware TSO.
11426          * Firmware TSO on older chips gives lower performance, so it
11427          * is off by default, but can be enabled using ethtool.
11428          */
11429         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
11430                 dev->features |= NETIF_F_TSO;
11431
11432 #endif
11433
11434         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11435             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11436             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11437                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11438                 tp->rx_pending = 63;
11439         }
11440
11441         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11442             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11443                 tp->pdev_peer = tg3_find_peer(tp);
11444
11445         err = tg3_get_device_address(tp);
11446         if (err) {
11447                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11448                        "aborting.\n");
11449                 goto err_out_iounmap;
11450         }
11451
11452         /*
11453          * Reset chip in case UNDI or EFI driver did not shutdown
11454          * DMA self test will enable WDMAC and we'll see (spurious)
11455          * pending DMA on the PCI bus at that point.
11456          */
11457         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11458             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11459                 pci_save_state(tp->pdev);
11460                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11461                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11462         }
11463
11464         err = tg3_test_dma(tp);
11465         if (err) {
11466                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11467                 goto err_out_iounmap;
11468         }
11469
11470         /* Tigon3 can do ipv4 only... and some chips have buggy
11471          * checksumming.
11472          */
11473         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11474                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11475                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11476                         dev->features |= NETIF_F_HW_CSUM;
11477                 else
11478                         dev->features |= NETIF_F_IP_CSUM;
11479                 dev->features |= NETIF_F_SG;
11480                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11481         } else
11482                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11483
11484         /* flow control autonegotiation is default behavior */
11485         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11486
11487         tg3_init_coal(tp);
11488
11489         /* Now that we have fully setup the chip, save away a snapshot
11490          * of the PCI config space.  We need to restore this after
11491          * GRC_MISC_CFG core clock resets and some resume events.
11492          */
11493         pci_save_state(tp->pdev);
11494
11495         err = register_netdev(dev);
11496         if (err) {
11497                 printk(KERN_ERR PFX "Cannot register net device, "
11498                        "aborting.\n");
11499                 goto err_out_iounmap;
11500         }
11501
11502         pci_set_drvdata(pdev, dev);
11503
11504         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11505                dev->name,
11506                tp->board_part_number,
11507                tp->pci_chip_rev_id,
11508                tg3_phy_string(tp),
11509                tg3_bus_string(tp, str),
11510                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11511
11512         for (i = 0; i < 6; i++)
11513                 printk("%2.2x%c", dev->dev_addr[i],
11514                        i == 5 ? '\n' : ':');
11515
11516         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11517                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11518                "TSOcap[%d] \n",
11519                dev->name,
11520                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11521                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11522                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11523                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11524                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11525                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11526                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11527         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11528                dev->name, tp->dma_rwctrl,
11529                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11530                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11531
11532         netif_carrier_off(tp->dev);
11533
11534         return 0;
11535
11536 err_out_iounmap:
11537         if (tp->regs) {
11538                 iounmap(tp->regs);
11539                 tp->regs = NULL;
11540         }
11541
11542 err_out_free_dev:
11543         free_netdev(dev);
11544
11545 err_out_free_res:
11546         pci_release_regions(pdev);
11547
11548 err_out_disable_pdev:
11549         pci_disable_device(pdev);
11550         pci_set_drvdata(pdev, NULL);
11551         return err;
11552 }
11553
11554 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11555 {
11556         struct net_device *dev = pci_get_drvdata(pdev);
11557
11558         if (dev) {
11559                 struct tg3 *tp = netdev_priv(dev);
11560
11561                 flush_scheduled_work();
11562                 unregister_netdev(dev);
11563                 if (tp->regs) {
11564                         iounmap(tp->regs);
11565                         tp->regs = NULL;
11566                 }
11567                 free_netdev(dev);
11568                 pci_release_regions(pdev);
11569                 pci_disable_device(pdev);
11570                 pci_set_drvdata(pdev, NULL);
11571         }
11572 }
11573
11574 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11575 {
11576         struct net_device *dev = pci_get_drvdata(pdev);
11577         struct tg3 *tp = netdev_priv(dev);
11578         int err;
11579
11580         if (!netif_running(dev))
11581                 return 0;
11582
11583         flush_scheduled_work();
11584         tg3_netif_stop(tp);
11585
11586         del_timer_sync(&tp->timer);
11587
11588         tg3_full_lock(tp, 1);
11589         tg3_disable_ints(tp);
11590         tg3_full_unlock(tp);
11591
11592         netif_device_detach(dev);
11593
11594         tg3_full_lock(tp, 0);
11595         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11596         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11597         tg3_full_unlock(tp);
11598
11599         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11600         if (err) {
11601                 tg3_full_lock(tp, 0);
11602
11603                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11604                 tg3_init_hw(tp, 1);
11605
11606                 tp->timer.expires = jiffies + tp->timer_offset;
11607                 add_timer(&tp->timer);
11608
11609                 netif_device_attach(dev);
11610                 tg3_netif_start(tp);
11611
11612                 tg3_full_unlock(tp);
11613         }
11614
11615         return err;
11616 }
11617
11618 static int tg3_resume(struct pci_dev *pdev)
11619 {
11620         struct net_device *dev = pci_get_drvdata(pdev);
11621         struct tg3 *tp = netdev_priv(dev);
11622         int err;
11623
11624         if (!netif_running(dev))
11625                 return 0;
11626
11627         pci_restore_state(tp->pdev);
11628
11629         err = tg3_set_power_state(tp, PCI_D0);
11630         if (err)
11631                 return err;
11632
11633         netif_device_attach(dev);
11634
11635         tg3_full_lock(tp, 0);
11636
11637         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11638         tg3_init_hw(tp, 1);
11639
11640         tp->timer.expires = jiffies + tp->timer_offset;
11641         add_timer(&tp->timer);
11642
11643         tg3_netif_start(tp);
11644
11645         tg3_full_unlock(tp);
11646
11647         return 0;
11648 }
11649
11650 static struct pci_driver tg3_driver = {
11651         .name           = DRV_MODULE_NAME,
11652         .id_table       = tg3_pci_tbl,
11653         .probe          = tg3_init_one,
11654         .remove         = __devexit_p(tg3_remove_one),
11655         .suspend        = tg3_suspend,
11656         .resume         = tg3_resume
11657 };
11658
11659 static int __init tg3_init(void)
11660 {
11661         return pci_module_init(&tg3_driver);
11662 }
11663
11664 static void __exit tg3_cleanup(void)
11665 {
11666         pci_unregister_driver(&tg3_driver);
11667 }
11668
11669 module_init(tg3_init);
11670 module_exit(tg3_cleanup);