2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
6 * and Andrew de Quincey.
8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
9 * trademarks of NVIDIA Corporation in the United States and other
12 * Copyright (C) 2003,4,5 Manfred Spraul
13 * Copyright (C) 2004 Andrew de Quincey (wol support)
14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
15 * IRQ rate fixes, bigendian fixes, cleanups, verification)
16 * Copyright (c) 2004,5,6 NVIDIA Corporation
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 * 0.01: 05 Oct 2003: First release that compiles without warnings.
34 * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs.
35 * Check all PCI BARs for the register window.
36 * udelay added to mii_rw.
37 * 0.03: 06 Oct 2003: Initialize dev->irq.
38 * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks.
39 * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout.
40 * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated,
42 * 0.07: 14 Oct 2003: Further irq mask updates.
43 * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill
44 * added into irq handler, NULL check for drain_ring.
45 * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the
46 * requested interrupt sources.
47 * 0.10: 20 Oct 2003: First cleanup for release.
48 * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased.
49 * MAC Address init fix, set_multicast cleanup.
50 * 0.12: 23 Oct 2003: Cleanups for release.
51 * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10.
52 * Set link speed correctly. start rx before starting
53 * tx (nv_start_rx sets the link speed).
54 * 0.14: 25 Oct 2003: Nic dependant irq mask.
55 * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during
57 * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size
58 * increased to 1628 bytes.
59 * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from
61 * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats
62 * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
63 * addresses, really stop rx if already running
64 * in nv_start_rx, clean up a bit.
65 * 0.20: 07 Dec 2003: alloc fixes
66 * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
67 * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
69 * 0.23: 26 Jan 2004: various small cleanups
70 * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces
71 * 0.25: 09 Mar 2004: wol support
72 * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes
73 * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings,
74 * added CK804/MCP04 device IDs, code fixes
75 * for registers, link status and other minor fixes.
76 * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe
77 * 0.29: 31 Aug 2004: Add backup timer for link change notification.
78 * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
79 * into nv_close, otherwise reenabling for wol can
80 * cause DMA to kfree'd memory.
81 * 0.31: 14 Nov 2004: ethtool support for getting/setting link
83 * 0.32: 16 Apr 2005: RX_ERROR4 handling added.
84 * 0.33: 16 May 2005: Support for MCP51 added.
85 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
86 * 0.35: 26 Jun 2005: Support for MCP55 added.
87 * 0.36: 28 Jun 2005: Add jumbo frame support.
88 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
89 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
91 * 0.39: 18 Jul 2005: Add 64bit descriptor support.
92 * 0.40: 19 Jul 2005: Add support for mac address change.
93 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead
95 * 0.42: 06 Aug 2005: Fix lack of link speed initialization
96 * in the second (and later) nv_open call
97 * 0.43: 10 Aug 2005: Add support for tx checksum.
98 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation.
99 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check
100 * 0.46: 20 Oct 2005: Add irq optimization modes.
101 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
102 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
103 * 0.49: 10 Dec 2005: Fix tso for large buffers.
104 * 0.50: 20 Jan 2006: Add 8021pq tagging support.
105 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
106 * 0.52: 20 Jan 2006: Add MSI/MSIX support.
107 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
108 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
109 * 0.55: 22 Mar 2006: Add flow control (pause frame).
110 * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
111 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
112 * 0.58: 30 Oct 2006: Added support for sideband management unit.
113 * 0.59: 30 Oct 2006: Added support for recoverable error.
116 * We suspect that on some hardware no TX done interrupts are generated.
117 * This means recovery from netif_stop_queue only happens if the hw timer
118 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
119 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
120 * If your hardware reliably generates tx done interrupts, then you can remove
121 * DEV_NEED_TIMERIRQ from the driver_data flags.
122 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
123 * superfluous timer interrupts from the nic.
125 #ifdef CONFIG_FORCEDETH_NAPI
126 #define DRIVERNAPI "-NAPI"
130 #define FORCEDETH_VERSION "0.59"
131 #define DRV_NAME "forcedeth"
133 #include <linux/module.h>
134 #include <linux/types.h>
135 #include <linux/pci.h>
136 #include <linux/interrupt.h>
137 #include <linux/netdevice.h>
138 #include <linux/etherdevice.h>
139 #include <linux/delay.h>
140 #include <linux/spinlock.h>
141 #include <linux/ethtool.h>
142 #include <linux/timer.h>
143 #include <linux/skbuff.h>
144 #include <linux/mii.h>
145 #include <linux/random.h>
146 #include <linux/init.h>
147 #include <linux/if_vlan.h>
148 #include <linux/dma-mapping.h>
152 #include <asm/uaccess.h>
153 #include <asm/system.h>
156 #define dprintk printk
158 #define dprintk(x...) do { } while (0)
166 #define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */
167 #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
168 #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
169 #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
170 #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */
171 #define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */
172 #define DEV_HAS_MSI 0x0040 /* device supports MSI */
173 #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
174 #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
175 #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
176 #define DEV_HAS_STATISTICS_V1 0x0400 /* device supports hw statistics version 1 */
177 #define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */
178 #define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */
179 #define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */
182 NvRegIrqStatus = 0x000,
183 #define NVREG_IRQSTAT_MIIEVENT 0x040
184 #define NVREG_IRQSTAT_MASK 0x81ff
185 NvRegIrqMask = 0x004,
186 #define NVREG_IRQ_RX_ERROR 0x0001
187 #define NVREG_IRQ_RX 0x0002
188 #define NVREG_IRQ_RX_NOBUF 0x0004
189 #define NVREG_IRQ_TX_ERR 0x0008
190 #define NVREG_IRQ_TX_OK 0x0010
191 #define NVREG_IRQ_TIMER 0x0020
192 #define NVREG_IRQ_LINK 0x0040
193 #define NVREG_IRQ_RX_FORCED 0x0080
194 #define NVREG_IRQ_TX_FORCED 0x0100
195 #define NVREG_IRQ_RECOVER_ERROR 0x8000
196 #define NVREG_IRQMASK_THROUGHPUT 0x00df
197 #define NVREG_IRQMASK_CPU 0x0040
198 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
199 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
200 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
202 #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
203 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
204 NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR))
206 NvRegUnknownSetupReg6 = 0x008,
207 #define NVREG_UNKSETUP6_VAL 3
210 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
211 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
213 NvRegPollingInterval = 0x00c,
214 #define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */
215 #define NVREG_POLL_DEFAULT_CPU 13
216 NvRegMSIMap0 = 0x020,
217 NvRegMSIMap1 = 0x024,
218 NvRegMSIIrqMask = 0x030,
219 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
221 #define NVREG_MISC1_PAUSE_TX 0x01
222 #define NVREG_MISC1_HD 0x02
223 #define NVREG_MISC1_FORCE 0x3b0f3c
225 NvRegMacReset = 0x3c,
226 #define NVREG_MAC_RESET_ASSERT 0x0F3
227 NvRegTransmitterControl = 0x084,
228 #define NVREG_XMITCTL_START 0x01
229 #define NVREG_XMITCTL_MGMT_ST 0x40000000
230 #define NVREG_XMITCTL_SYNC_MASK 0x000f0000
231 #define NVREG_XMITCTL_SYNC_NOT_READY 0x0
232 #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
233 #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
234 #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
235 #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
236 #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
237 #define NVREG_XMITCTL_HOST_LOADED 0x00004000
238 #define NVREG_XMITCTL_TX_PATH_EN 0x01000000
239 NvRegTransmitterStatus = 0x088,
240 #define NVREG_XMITSTAT_BUSY 0x01
242 NvRegPacketFilterFlags = 0x8c,
243 #define NVREG_PFF_PAUSE_RX 0x08
244 #define NVREG_PFF_ALWAYS 0x7F0000
245 #define NVREG_PFF_PROMISC 0x80
246 #define NVREG_PFF_MYADDR 0x20
247 #define NVREG_PFF_LOOPBACK 0x10
249 NvRegOffloadConfig = 0x90,
250 #define NVREG_OFFLOAD_HOMEPHY 0x601
251 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
252 NvRegReceiverControl = 0x094,
253 #define NVREG_RCVCTL_START 0x01
254 #define NVREG_RCVCTL_RX_PATH_EN 0x01000000
255 NvRegReceiverStatus = 0x98,
256 #define NVREG_RCVSTAT_BUSY 0x01
258 NvRegRandomSeed = 0x9c,
259 #define NVREG_RNDSEED_MASK 0x00ff
260 #define NVREG_RNDSEED_FORCE 0x7f00
261 #define NVREG_RNDSEED_FORCE2 0x2d00
262 #define NVREG_RNDSEED_FORCE3 0x7400
264 NvRegTxDeferral = 0xA0,
265 #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
266 #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
267 #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
268 NvRegRxDeferral = 0xA4,
269 #define NVREG_RX_DEFERRAL_DEFAULT 0x16
270 NvRegMacAddrA = 0xA8,
271 NvRegMacAddrB = 0xAC,
272 NvRegMulticastAddrA = 0xB0,
273 #define NVREG_MCASTADDRA_FORCE 0x01
274 NvRegMulticastAddrB = 0xB4,
275 NvRegMulticastMaskA = 0xB8,
276 NvRegMulticastMaskB = 0xBC,
278 NvRegPhyInterface = 0xC0,
279 #define PHY_RGMII 0x10000000
281 NvRegTxRingPhysAddr = 0x100,
282 NvRegRxRingPhysAddr = 0x104,
283 NvRegRingSizes = 0x108,
284 #define NVREG_RINGSZ_TXSHIFT 0
285 #define NVREG_RINGSZ_RXSHIFT 16
286 NvRegTransmitPoll = 0x10c,
287 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
288 NvRegLinkSpeed = 0x110,
289 #define NVREG_LINKSPEED_FORCE 0x10000
290 #define NVREG_LINKSPEED_10 1000
291 #define NVREG_LINKSPEED_100 100
292 #define NVREG_LINKSPEED_1000 50
293 #define NVREG_LINKSPEED_MASK (0xFFF)
294 NvRegUnknownSetupReg5 = 0x130,
295 #define NVREG_UNKSETUP5_BIT31 (1<<31)
296 NvRegTxWatermark = 0x13c,
297 #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
298 #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
299 #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
300 NvRegTxRxControl = 0x144,
301 #define NVREG_TXRXCTL_KICK 0x0001
302 #define NVREG_TXRXCTL_BIT1 0x0002
303 #define NVREG_TXRXCTL_BIT2 0x0004
304 #define NVREG_TXRXCTL_IDLE 0x0008
305 #define NVREG_TXRXCTL_RESET 0x0010
306 #define NVREG_TXRXCTL_RXCHECK 0x0400
307 #define NVREG_TXRXCTL_DESC_1 0
308 #define NVREG_TXRXCTL_DESC_2 0x002100
309 #define NVREG_TXRXCTL_DESC_3 0xc02200
310 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
311 #define NVREG_TXRXCTL_VLANINS 0x00080
312 NvRegTxRingPhysAddrHigh = 0x148,
313 NvRegRxRingPhysAddrHigh = 0x14C,
314 NvRegTxPauseFrame = 0x170,
315 #define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080
316 #define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030
317 NvRegMIIStatus = 0x180,
318 #define NVREG_MIISTAT_ERROR 0x0001
319 #define NVREG_MIISTAT_LINKCHANGE 0x0008
320 #define NVREG_MIISTAT_MASK 0x000f
321 #define NVREG_MIISTAT_MASK2 0x000f
322 NvRegMIIMask = 0x184,
323 #define NVREG_MII_LINKCHANGE 0x0008
325 NvRegAdapterControl = 0x188,
326 #define NVREG_ADAPTCTL_START 0x02
327 #define NVREG_ADAPTCTL_LINKUP 0x04
328 #define NVREG_ADAPTCTL_PHYVALID 0x40000
329 #define NVREG_ADAPTCTL_RUNNING 0x100000
330 #define NVREG_ADAPTCTL_PHYSHIFT 24
331 NvRegMIISpeed = 0x18c,
332 #define NVREG_MIISPEED_BIT8 (1<<8)
333 #define NVREG_MIIDELAY 5
334 NvRegMIIControl = 0x190,
335 #define NVREG_MIICTL_INUSE 0x08000
336 #define NVREG_MIICTL_WRITE 0x00400
337 #define NVREG_MIICTL_ADDRSHIFT 5
338 NvRegMIIData = 0x194,
339 NvRegWakeUpFlags = 0x200,
340 #define NVREG_WAKEUPFLAGS_VAL 0x7770
341 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
342 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
343 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
344 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
345 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
346 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
347 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
348 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
349 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
350 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
352 NvRegPatternCRC = 0x204,
353 NvRegPatternMask = 0x208,
354 NvRegPowerCap = 0x268,
355 #define NVREG_POWERCAP_D3SUPP (1<<30)
356 #define NVREG_POWERCAP_D2SUPP (1<<26)
357 #define NVREG_POWERCAP_D1SUPP (1<<25)
358 NvRegPowerState = 0x26c,
359 #define NVREG_POWERSTATE_POWEREDUP 0x8000
360 #define NVREG_POWERSTATE_VALID 0x0100
361 #define NVREG_POWERSTATE_MASK 0x0003
362 #define NVREG_POWERSTATE_D0 0x0000
363 #define NVREG_POWERSTATE_D1 0x0001
364 #define NVREG_POWERSTATE_D2 0x0002
365 #define NVREG_POWERSTATE_D3 0x0003
367 NvRegTxZeroReXmt = 0x284,
368 NvRegTxOneReXmt = 0x288,
369 NvRegTxManyReXmt = 0x28c,
370 NvRegTxLateCol = 0x290,
371 NvRegTxUnderflow = 0x294,
372 NvRegTxLossCarrier = 0x298,
373 NvRegTxExcessDef = 0x29c,
374 NvRegTxRetryErr = 0x2a0,
375 NvRegRxFrameErr = 0x2a4,
376 NvRegRxExtraByte = 0x2a8,
377 NvRegRxLateCol = 0x2ac,
379 NvRegRxFrameTooLong = 0x2b4,
380 NvRegRxOverflow = 0x2b8,
381 NvRegRxFCSErr = 0x2bc,
382 NvRegRxFrameAlignErr = 0x2c0,
383 NvRegRxLenErr = 0x2c4,
384 NvRegRxUnicast = 0x2c8,
385 NvRegRxMulticast = 0x2cc,
386 NvRegRxBroadcast = 0x2d0,
388 NvRegTxFrame = 0x2d8,
390 NvRegTxPause = 0x2e0,
391 NvRegRxPause = 0x2e4,
392 NvRegRxDropFrame = 0x2e8,
393 NvRegVlanControl = 0x300,
394 #define NVREG_VLANCONTROL_ENABLE 0x2000
395 NvRegMSIXMap0 = 0x3e0,
396 NvRegMSIXMap1 = 0x3e4,
397 NvRegMSIXIrqStatus = 0x3f0,
399 NvRegPowerState2 = 0x600,
400 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11
401 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
404 /* Big endian: should work, but is untested */
410 struct ring_desc_ex {
418 struct ring_desc* orig;
419 struct ring_desc_ex* ex;
422 #define FLAG_MASK_V1 0xffff0000
423 #define FLAG_MASK_V2 0xffffc000
424 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
425 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
427 #define NV_TX_LASTPACKET (1<<16)
428 #define NV_TX_RETRYERROR (1<<19)
429 #define NV_TX_FORCED_INTERRUPT (1<<24)
430 #define NV_TX_DEFERRED (1<<26)
431 #define NV_TX_CARRIERLOST (1<<27)
432 #define NV_TX_LATECOLLISION (1<<28)
433 #define NV_TX_UNDERFLOW (1<<29)
434 #define NV_TX_ERROR (1<<30)
435 #define NV_TX_VALID (1<<31)
437 #define NV_TX2_LASTPACKET (1<<29)
438 #define NV_TX2_RETRYERROR (1<<18)
439 #define NV_TX2_FORCED_INTERRUPT (1<<30)
440 #define NV_TX2_DEFERRED (1<<25)
441 #define NV_TX2_CARRIERLOST (1<<26)
442 #define NV_TX2_LATECOLLISION (1<<27)
443 #define NV_TX2_UNDERFLOW (1<<28)
444 /* error and valid are the same for both */
445 #define NV_TX2_ERROR (1<<30)
446 #define NV_TX2_VALID (1<<31)
447 #define NV_TX2_TSO (1<<28)
448 #define NV_TX2_TSO_SHIFT 14
449 #define NV_TX2_TSO_MAX_SHIFT 14
450 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
451 #define NV_TX2_CHECKSUM_L3 (1<<27)
452 #define NV_TX2_CHECKSUM_L4 (1<<26)
454 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
456 #define NV_RX_DESCRIPTORVALID (1<<16)
457 #define NV_RX_MISSEDFRAME (1<<17)
458 #define NV_RX_SUBSTRACT1 (1<<18)
459 #define NV_RX_ERROR1 (1<<23)
460 #define NV_RX_ERROR2 (1<<24)
461 #define NV_RX_ERROR3 (1<<25)
462 #define NV_RX_ERROR4 (1<<26)
463 #define NV_RX_CRCERR (1<<27)
464 #define NV_RX_OVERFLOW (1<<28)
465 #define NV_RX_FRAMINGERR (1<<29)
466 #define NV_RX_ERROR (1<<30)
467 #define NV_RX_AVAIL (1<<31)
469 #define NV_RX2_CHECKSUMMASK (0x1C000000)
470 #define NV_RX2_CHECKSUMOK1 (0x10000000)
471 #define NV_RX2_CHECKSUMOK2 (0x14000000)
472 #define NV_RX2_CHECKSUMOK3 (0x18000000)
473 #define NV_RX2_DESCRIPTORVALID (1<<29)
474 #define NV_RX2_SUBSTRACT1 (1<<25)
475 #define NV_RX2_ERROR1 (1<<18)
476 #define NV_RX2_ERROR2 (1<<19)
477 #define NV_RX2_ERROR3 (1<<20)
478 #define NV_RX2_ERROR4 (1<<21)
479 #define NV_RX2_CRCERR (1<<22)
480 #define NV_RX2_OVERFLOW (1<<23)
481 #define NV_RX2_FRAMINGERR (1<<24)
482 /* error and avail are the same for both */
483 #define NV_RX2_ERROR (1<<30)
484 #define NV_RX2_AVAIL (1<<31)
486 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
487 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
489 /* Miscelaneous hardware related defines: */
490 #define NV_PCI_REGSZ_VER1 0x270
491 #define NV_PCI_REGSZ_VER2 0x2d4
492 #define NV_PCI_REGSZ_VER3 0x604
494 /* various timeout delays: all in usec */
495 #define NV_TXRX_RESET_DELAY 4
496 #define NV_TXSTOP_DELAY1 10
497 #define NV_TXSTOP_DELAY1MAX 500000
498 #define NV_TXSTOP_DELAY2 100
499 #define NV_RXSTOP_DELAY1 10
500 #define NV_RXSTOP_DELAY1MAX 500000
501 #define NV_RXSTOP_DELAY2 100
502 #define NV_SETUP5_DELAY 5
503 #define NV_SETUP5_DELAYMAX 50000
504 #define NV_POWERUP_DELAY 5
505 #define NV_POWERUP_DELAYMAX 5000
506 #define NV_MIIBUSY_DELAY 50
507 #define NV_MIIPHY_DELAY 10
508 #define NV_MIIPHY_DELAYMAX 10000
509 #define NV_MAC_RESET_DELAY 64
511 #define NV_WAKEUPPATTERNS 5
512 #define NV_WAKEUPMASKENTRIES 4
514 /* General driver defaults */
515 #define NV_WATCHDOG_TIMEO (5*HZ)
517 #define RX_RING_DEFAULT 128
518 #define TX_RING_DEFAULT 256
519 #define RX_RING_MIN 128
520 #define TX_RING_MIN 64
521 #define RING_MAX_DESC_VER_1 1024
522 #define RING_MAX_DESC_VER_2_3 16384
524 /* rx/tx mac addr + type + vlan + align + slack*/
525 #define NV_RX_HEADERS (64)
526 /* even more slack. */
527 #define NV_RX_ALLOC_PAD (64)
529 /* maximum mtu size */
530 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
531 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
533 #define OOM_REFILL (1+HZ/20)
534 #define POLL_WAIT (1+HZ/100)
535 #define LINK_TIMEOUT (3*HZ)
536 #define STATS_INTERVAL (10*HZ)
540 * The nic supports three different descriptor types:
541 * - DESC_VER_1: Original
542 * - DESC_VER_2: support for jumbo frames.
543 * - DESC_VER_3: 64-bit format.
550 #define PHY_OUI_MARVELL 0x5043
551 #define PHY_OUI_CICADA 0x03f1
552 #define PHYID1_OUI_MASK 0x03ff
553 #define PHYID1_OUI_SHFT 6
554 #define PHYID2_OUI_MASK 0xfc00
555 #define PHYID2_OUI_SHFT 10
556 #define PHYID2_MODEL_MASK 0x03f0
557 #define PHY_MODEL_MARVELL_E3016 0x220
558 #define PHY_MARVELL_E3016_INITMASK 0x0300
559 #define PHY_INIT1 0x0f000
560 #define PHY_INIT2 0x0e00
561 #define PHY_INIT3 0x01000
562 #define PHY_INIT4 0x0200
563 #define PHY_INIT5 0x0004
564 #define PHY_INIT6 0x02000
565 #define PHY_GIGABIT 0x0100
567 #define PHY_TIMEOUT 0x1
568 #define PHY_ERROR 0x2
572 #define PHY_HALF 0x100
574 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
575 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
576 #define NV_PAUSEFRAME_RX_ENABLE 0x0004
577 #define NV_PAUSEFRAME_TX_ENABLE 0x0008
578 #define NV_PAUSEFRAME_RX_REQ 0x0010
579 #define NV_PAUSEFRAME_TX_REQ 0x0020
580 #define NV_PAUSEFRAME_AUTONEG 0x0040
582 /* MSI/MSI-X defines */
583 #define NV_MSI_X_MAX_VECTORS 8
584 #define NV_MSI_X_VECTORS_MASK 0x000f
585 #define NV_MSI_CAPABLE 0x0010
586 #define NV_MSI_X_CAPABLE 0x0020
587 #define NV_MSI_ENABLED 0x0040
588 #define NV_MSI_X_ENABLED 0x0080
590 #define NV_MSI_X_VECTOR_ALL 0x0
591 #define NV_MSI_X_VECTOR_RX 0x0
592 #define NV_MSI_X_VECTOR_TX 0x1
593 #define NV_MSI_X_VECTOR_OTHER 0x2
596 struct nv_ethtool_str {
597 char name[ETH_GSTRING_LEN];
600 static const struct nv_ethtool_str nv_estats_str[] = {
605 { "tx_late_collision" },
606 { "tx_fifo_errors" },
607 { "tx_carrier_errors" },
608 { "tx_excess_deferral" },
609 { "tx_retry_error" },
610 { "rx_frame_error" },
612 { "rx_late_collision" },
614 { "rx_frame_too_long" },
615 { "rx_over_errors" },
617 { "rx_frame_align_error" },
618 { "rx_length_error" },
623 { "rx_errors_total" },
624 { "tx_errors_total" },
626 /* version 2 stats */
635 struct nv_ethtool_stats {
640 u64 tx_late_collision;
642 u64 tx_carrier_errors;
643 u64 tx_excess_deferral;
647 u64 rx_late_collision;
649 u64 rx_frame_too_long;
652 u64 rx_frame_align_error;
661 /* version 2 stats */
670 #define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
671 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
674 #define NV_TEST_COUNT_BASE 3
675 #define NV_TEST_COUNT_EXTENDED 4
677 static const struct nv_ethtool_str nv_etests_str[] = {
678 { "link (online/offline)" },
679 { "register (offline) " },
680 { "interrupt (offline) " },
681 { "loopback (offline) " }
684 struct register_test {
689 static const struct register_test nv_registers_test[] = {
690 { NvRegUnknownSetupReg6, 0x01 },
691 { NvRegMisc1, 0x03c },
692 { NvRegOffloadConfig, 0x03ff },
693 { NvRegMulticastAddrA, 0xffffffff },
694 { NvRegTxWatermark, 0x0ff },
695 { NvRegWakeUpFlags, 0x07777 },
702 unsigned int dma_len;
707 * All hardware access under dev->priv->lock, except the performance
709 * - rx is (pseudo-) lockless: it relies on the single-threading provided
710 * by the arch code for interrupts.
711 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
712 * needs dev->priv->lock :-(
713 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
716 /* in dev: base, irq */
721 * Locking: spin_lock(&np->lock); */
722 struct net_device_stats stats;
723 struct nv_ethtool_stats estats;
731 unsigned int phy_oui;
732 unsigned int phy_model;
737 /* General data: RO fields */
738 dma_addr_t ring_addr;
739 struct pci_dev *pci_dev;
752 /* rx specific fields.
753 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
755 union ring_type get_rx, put_rx, first_rx, last_rx;
756 struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
757 struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
758 struct nv_skb_map *rx_skb;
760 union ring_type rx_ring;
761 unsigned int rx_buf_sz;
762 unsigned int pkt_limit;
763 struct timer_list oom_kick;
764 struct timer_list nic_poll;
765 struct timer_list stats_poll;
769 /* media detection workaround.
770 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
773 unsigned long link_timeout;
775 * tx specific fields.
777 union ring_type get_tx, put_tx, first_tx, last_tx;
778 struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
779 struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
780 struct nv_skb_map *tx_skb;
782 union ring_type tx_ring;
788 struct vlan_group *vlangrp;
790 /* msi/msi-x fields */
792 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
799 * Maximum number of loops until we assume that a bit in the irq mask
800 * is stuck. Overridable with module param.
802 static int max_interrupt_work = 5;
805 * Optimization can be either throuput mode or cpu mode
807 * Throughput Mode: Every tx and rx packet will generate an interrupt.
808 * CPU Mode: Interrupts are controlled by a timer.
811 NV_OPTIMIZATION_MODE_THROUGHPUT,
812 NV_OPTIMIZATION_MODE_CPU
814 static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
817 * Poll interval for timer irq
819 * This interval determines how frequent an interrupt is generated.
820 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
821 * Min = 0, and Max = 65535
823 static int poll_interval = -1;
832 static int msi = NV_MSI_INT_ENABLED;
838 NV_MSIX_INT_DISABLED,
841 static int msix = NV_MSIX_INT_ENABLED;
847 NV_DMA_64BIT_DISABLED,
850 static int dma_64bit = NV_DMA_64BIT_ENABLED;
852 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
854 return netdev_priv(dev);
857 static inline u8 __iomem *get_hwbase(struct net_device *dev)
859 return ((struct fe_priv *)netdev_priv(dev))->base;
862 static inline void pci_push(u8 __iomem *base)
864 /* force out pending posted writes */
868 static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
870 return le32_to_cpu(prd->flaglen)
871 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
874 static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
876 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
879 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
880 int delay, int delaymax, const char *msg)
882 u8 __iomem *base = get_hwbase(dev);
893 } while ((readl(base + offset) & mask) != target);
897 #define NV_SETUP_RX_RING 0x01
898 #define NV_SETUP_TX_RING 0x02
900 static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
902 struct fe_priv *np = get_nvpriv(dev);
903 u8 __iomem *base = get_hwbase(dev);
905 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
906 if (rxtx_flags & NV_SETUP_RX_RING) {
907 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
909 if (rxtx_flags & NV_SETUP_TX_RING) {
910 writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
913 if (rxtx_flags & NV_SETUP_RX_RING) {
914 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
915 writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh);
917 if (rxtx_flags & NV_SETUP_TX_RING) {
918 writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
919 writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh);
924 static void free_rings(struct net_device *dev)
926 struct fe_priv *np = get_nvpriv(dev);
928 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
929 if (np->rx_ring.orig)
930 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
931 np->rx_ring.orig, np->ring_addr);
934 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
935 np->rx_ring.ex, np->ring_addr);
943 static int using_multi_irqs(struct net_device *dev)
945 struct fe_priv *np = get_nvpriv(dev);
947 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
948 ((np->msi_flags & NV_MSI_X_ENABLED) &&
949 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
955 static void nv_enable_irq(struct net_device *dev)
957 struct fe_priv *np = get_nvpriv(dev);
959 if (!using_multi_irqs(dev)) {
960 if (np->msi_flags & NV_MSI_X_ENABLED)
961 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
963 enable_irq(dev->irq);
965 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
966 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
967 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
971 static void nv_disable_irq(struct net_device *dev)
973 struct fe_priv *np = get_nvpriv(dev);
975 if (!using_multi_irqs(dev)) {
976 if (np->msi_flags & NV_MSI_X_ENABLED)
977 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
979 disable_irq(dev->irq);
981 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
982 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
983 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
987 /* In MSIX mode, a write to irqmask behaves as XOR */
988 static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
990 u8 __iomem *base = get_hwbase(dev);
992 writel(mask, base + NvRegIrqMask);
995 static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
997 struct fe_priv *np = get_nvpriv(dev);
998 u8 __iomem *base = get_hwbase(dev);
1000 if (np->msi_flags & NV_MSI_X_ENABLED) {
1001 writel(mask, base + NvRegIrqMask);
1003 if (np->msi_flags & NV_MSI_ENABLED)
1004 writel(0, base + NvRegMSIIrqMask);
1005 writel(0, base + NvRegIrqMask);
1009 #define MII_READ (-1)
1010 /* mii_rw: read/write a register on the PHY.
1012 * Caller must guarantee serialization
1014 static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1016 u8 __iomem *base = get_hwbase(dev);
1020 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
1022 reg = readl(base + NvRegMIIControl);
1023 if (reg & NVREG_MIICTL_INUSE) {
1024 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1025 udelay(NV_MIIBUSY_DELAY);
1028 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
1029 if (value != MII_READ) {
1030 writel(value, base + NvRegMIIData);
1031 reg |= NVREG_MIICTL_WRITE;
1033 writel(reg, base + NvRegMIIControl);
1035 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1036 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
1037 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
1038 dev->name, miireg, addr);
1040 } else if (value != MII_READ) {
1041 /* it was a write operation - fewer failures are detectable */
1042 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
1043 dev->name, value, miireg, addr);
1045 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1046 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
1047 dev->name, miireg, addr);
1050 retval = readl(base + NvRegMIIData);
1051 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
1052 dev->name, miireg, addr, retval);
1058 static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1060 struct fe_priv *np = netdev_priv(dev);
1062 unsigned int tries = 0;
1064 miicontrol = BMCR_RESET | bmcr_setup;
1065 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
1069 /* wait for 500ms */
1072 /* must wait till reset is deasserted */
1073 while (miicontrol & BMCR_RESET) {
1075 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1076 /* FIXME: 100 tries seem excessive */
1083 static int phy_init(struct net_device *dev)
1085 struct fe_priv *np = get_nvpriv(dev);
1086 u8 __iomem *base = get_hwbase(dev);
1087 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
1089 /* phy errata for E3016 phy */
1090 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1091 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1092 reg &= ~PHY_MARVELL_E3016_INITMASK;
1093 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1094 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
1099 /* set advertise register */
1100 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1101 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
1102 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1103 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
1107 /* get phy interface type */
1108 phyinterface = readl(base + NvRegPhyInterface);
1110 /* see if gigabit phy */
1111 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1112 if (mii_status & PHY_GIGABIT) {
1113 np->gigabit = PHY_GIGABIT;
1114 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
1115 mii_control_1000 &= ~ADVERTISE_1000HALF;
1116 if (phyinterface & PHY_RGMII)
1117 mii_control_1000 |= ADVERTISE_1000FULL;
1119 mii_control_1000 &= ~ADVERTISE_1000FULL;
1121 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1122 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1129 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1130 mii_control |= BMCR_ANENABLE;
1133 * (certain phys need bmcr to be setup with reset)
1135 if (phy_reset(dev, mii_control)) {
1136 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
1140 /* phy vendor specific configuration */
1141 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
1142 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1143 phy_reserved &= ~(PHY_INIT1 | PHY_INIT2);
1144 phy_reserved |= (PHY_INIT3 | PHY_INIT4);
1145 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
1146 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1149 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1150 phy_reserved |= PHY_INIT5;
1151 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
1152 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1156 if (np->phy_oui == PHY_OUI_CICADA) {
1157 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1158 phy_reserved |= PHY_INIT6;
1159 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
1160 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1164 /* some phys clear out pause advertisment on reset, set it back */
1165 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1167 /* restart auto negotiation */
1168 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1169 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1170 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1177 static void nv_start_rx(struct net_device *dev)
1179 struct fe_priv *np = netdev_priv(dev);
1180 u8 __iomem *base = get_hwbase(dev);
1181 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1183 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
1184 /* Already running? Stop it. */
1185 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1186 rx_ctrl &= ~NVREG_RCVCTL_START;
1187 writel(rx_ctrl, base + NvRegReceiverControl);
1190 writel(np->linkspeed, base + NvRegLinkSpeed);
1192 rx_ctrl |= NVREG_RCVCTL_START;
1194 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1195 writel(rx_ctrl, base + NvRegReceiverControl);
1196 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1197 dev->name, np->duplex, np->linkspeed);
1201 static void nv_stop_rx(struct net_device *dev)
1203 struct fe_priv *np = netdev_priv(dev);
1204 u8 __iomem *base = get_hwbase(dev);
1205 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1207 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
1208 if (!np->mac_in_use)
1209 rx_ctrl &= ~NVREG_RCVCTL_START;
1211 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1212 writel(rx_ctrl, base + NvRegReceiverControl);
1213 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1214 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
1215 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
1217 udelay(NV_RXSTOP_DELAY2);
1218 if (!np->mac_in_use)
1219 writel(0, base + NvRegLinkSpeed);
1222 static void nv_start_tx(struct net_device *dev)
1224 struct fe_priv *np = netdev_priv(dev);
1225 u8 __iomem *base = get_hwbase(dev);
1226 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1228 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
1229 tx_ctrl |= NVREG_XMITCTL_START;
1231 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1232 writel(tx_ctrl, base + NvRegTransmitterControl);
1236 static void nv_stop_tx(struct net_device *dev)
1238 struct fe_priv *np = netdev_priv(dev);
1239 u8 __iomem *base = get_hwbase(dev);
1240 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1242 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
1243 if (!np->mac_in_use)
1244 tx_ctrl &= ~NVREG_XMITCTL_START;
1246 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1247 writel(tx_ctrl, base + NvRegTransmitterControl);
1248 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1249 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
1250 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
1252 udelay(NV_TXSTOP_DELAY2);
1253 if (!np->mac_in_use)
1254 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
1255 base + NvRegTransmitPoll);
1258 static void nv_txrx_reset(struct net_device *dev)
1260 struct fe_priv *np = netdev_priv(dev);
1261 u8 __iomem *base = get_hwbase(dev);
1263 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
1264 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1266 udelay(NV_TXRX_RESET_DELAY);
1267 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1271 static void nv_mac_reset(struct net_device *dev)
1273 struct fe_priv *np = netdev_priv(dev);
1274 u8 __iomem *base = get_hwbase(dev);
1276 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
1277 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1279 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1281 udelay(NV_MAC_RESET_DELAY);
1282 writel(0, base + NvRegMacReset);
1284 udelay(NV_MAC_RESET_DELAY);
1285 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1289 static void nv_get_hw_stats(struct net_device *dev)
1291 struct fe_priv *np = netdev_priv(dev);
1292 u8 __iomem *base = get_hwbase(dev);
1294 np->estats.tx_bytes += readl(base + NvRegTxCnt);
1295 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1296 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1297 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1298 np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1299 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1300 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1301 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1302 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1303 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1304 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1305 np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1306 np->estats.rx_runt += readl(base + NvRegRxRunt);
1307 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1308 np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1309 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1310 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1311 np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1312 np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1313 np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1314 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1315 np->estats.rx_packets =
1316 np->estats.rx_unicast +
1317 np->estats.rx_multicast +
1318 np->estats.rx_broadcast;
1319 np->estats.rx_errors_total =
1320 np->estats.rx_crc_errors +
1321 np->estats.rx_over_errors +
1322 np->estats.rx_frame_error +
1323 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1324 np->estats.rx_late_collision +
1325 np->estats.rx_runt +
1326 np->estats.rx_frame_too_long;
1327 np->estats.tx_errors_total =
1328 np->estats.tx_late_collision +
1329 np->estats.tx_fifo_errors +
1330 np->estats.tx_carrier_errors +
1331 np->estats.tx_excess_deferral +
1332 np->estats.tx_retry_error;
1334 if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1335 np->estats.tx_deferral += readl(base + NvRegTxDef);
1336 np->estats.tx_packets += readl(base + NvRegTxFrame);
1337 np->estats.rx_bytes += readl(base + NvRegRxCnt);
1338 np->estats.tx_pause += readl(base + NvRegTxPause);
1339 np->estats.rx_pause += readl(base + NvRegRxPause);
1340 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1345 * nv_get_stats: dev->get_stats function
1346 * Get latest stats value from the nic.
1347 * Called with read_lock(&dev_base_lock) held for read -
1348 * only synchronized against unregister_netdevice.
1350 static struct net_device_stats *nv_get_stats(struct net_device *dev)
1352 struct fe_priv *np = netdev_priv(dev);
1354 /* It seems that the nic always generates interrupts and doesn't
1355 * accumulate errors internally. Thus the current values in np->stats
1356 * are already up to date.
1362 * nv_alloc_rx: fill rx ring entries.
1363 * Return 1 if the allocations for the skbs failed and the
1364 * rx engine is without Available descriptors
1366 static int nv_alloc_rx(struct net_device *dev)
1368 struct fe_priv *np = netdev_priv(dev);
1369 struct ring_desc* less_rx;
1371 less_rx = np->get_rx.orig;
1372 if (less_rx-- == np->first_rx.orig)
1373 less_rx = np->last_rx.orig;
1375 while (np->put_rx.orig != less_rx) {
1376 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1379 np->put_rx_ctx->skb = skb;
1380 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
1381 skb->end-skb->data, PCI_DMA_FROMDEVICE);
1382 np->put_rx_ctx->dma_len = skb->end-skb->data;
1383 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1385 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1386 if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
1387 np->put_rx.orig = np->first_rx.orig;
1388 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1389 np->put_rx_ctx = np->first_rx_ctx;
1397 static int nv_alloc_rx_optimized(struct net_device *dev)
1399 struct fe_priv *np = netdev_priv(dev);
1400 struct ring_desc_ex* less_rx;
1402 less_rx = np->get_rx.ex;
1403 if (less_rx-- == np->first_rx.ex)
1404 less_rx = np->last_rx.ex;
1406 while (np->put_rx.ex != less_rx) {
1407 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1410 np->put_rx_ctx->skb = skb;
1411 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
1412 skb->end-skb->data, PCI_DMA_FROMDEVICE);
1413 np->put_rx_ctx->dma_len = skb->end-skb->data;
1414 np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32;
1415 np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF;
1417 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1418 if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
1419 np->put_rx.ex = np->first_rx.ex;
1420 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1421 np->put_rx_ctx = np->first_rx_ctx;
1429 /* If rx bufs are exhausted called after 50ms to attempt to refresh */
1430 #ifdef CONFIG_FORCEDETH_NAPI
1431 static void nv_do_rx_refill(unsigned long data)
1433 struct net_device *dev = (struct net_device *) data;
1435 /* Just reschedule NAPI rx processing */
1436 netif_rx_schedule(dev);
1439 static void nv_do_rx_refill(unsigned long data)
1441 struct net_device *dev = (struct net_device *) data;
1442 struct fe_priv *np = netdev_priv(dev);
1445 if (!using_multi_irqs(dev)) {
1446 if (np->msi_flags & NV_MSI_X_ENABLED)
1447 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1449 disable_irq(dev->irq);
1451 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1453 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1454 retcode = nv_alloc_rx(dev);
1456 retcode = nv_alloc_rx_optimized(dev);
1458 spin_lock_irq(&np->lock);
1459 if (!np->in_shutdown)
1460 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1461 spin_unlock_irq(&np->lock);
1463 if (!using_multi_irqs(dev)) {
1464 if (np->msi_flags & NV_MSI_X_ENABLED)
1465 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1467 enable_irq(dev->irq);
1469 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1474 static void nv_init_rx(struct net_device *dev)
1476 struct fe_priv *np = netdev_priv(dev);
1478 np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1479 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1480 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1482 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1483 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
1484 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1486 for (i = 0; i < np->rx_ring_size; i++) {
1487 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1488 np->rx_ring.orig[i].flaglen = 0;
1489 np->rx_ring.orig[i].buf = 0;
1491 np->rx_ring.ex[i].flaglen = 0;
1492 np->rx_ring.ex[i].txvlan = 0;
1493 np->rx_ring.ex[i].bufhigh = 0;
1494 np->rx_ring.ex[i].buflow = 0;
1496 np->rx_skb[i].skb = NULL;
1497 np->rx_skb[i].dma = 0;
1501 static void nv_init_tx(struct net_device *dev)
1503 struct fe_priv *np = netdev_priv(dev);
1505 np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1506 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1507 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1509 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1510 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
1511 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1513 for (i = 0; i < np->tx_ring_size; i++) {
1514 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1515 np->tx_ring.orig[i].flaglen = 0;
1516 np->tx_ring.orig[i].buf = 0;
1518 np->tx_ring.ex[i].flaglen = 0;
1519 np->tx_ring.ex[i].txvlan = 0;
1520 np->tx_ring.ex[i].bufhigh = 0;
1521 np->tx_ring.ex[i].buflow = 0;
1523 np->tx_skb[i].skb = NULL;
1524 np->tx_skb[i].dma = 0;
1528 static int nv_init_ring(struct net_device *dev)
1530 struct fe_priv *np = netdev_priv(dev);
1534 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1535 return nv_alloc_rx(dev);
1537 return nv_alloc_rx_optimized(dev);
1540 static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb)
1542 struct fe_priv *np = netdev_priv(dev);
1545 pci_unmap_page(np->pci_dev, tx_skb->dma,
1551 dev_kfree_skb_any(tx_skb->skb);
1559 static void nv_drain_tx(struct net_device *dev)
1561 struct fe_priv *np = netdev_priv(dev);
1564 for (i = 0; i < np->tx_ring_size; i++) {
1565 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1566 np->tx_ring.orig[i].flaglen = 0;
1567 np->tx_ring.orig[i].buf = 0;
1569 np->tx_ring.ex[i].flaglen = 0;
1570 np->tx_ring.ex[i].txvlan = 0;
1571 np->tx_ring.ex[i].bufhigh = 0;
1572 np->tx_ring.ex[i].buflow = 0;
1574 if (nv_release_txskb(dev, &np->tx_skb[i]))
1575 np->stats.tx_dropped++;
1579 static void nv_drain_rx(struct net_device *dev)
1581 struct fe_priv *np = netdev_priv(dev);
1584 for (i = 0; i < np->rx_ring_size; i++) {
1585 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1586 np->rx_ring.orig[i].flaglen = 0;
1587 np->rx_ring.orig[i].buf = 0;
1589 np->rx_ring.ex[i].flaglen = 0;
1590 np->rx_ring.ex[i].txvlan = 0;
1591 np->rx_ring.ex[i].bufhigh = 0;
1592 np->rx_ring.ex[i].buflow = 0;
1595 if (np->rx_skb[i].skb) {
1596 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
1597 np->rx_skb[i].skb->end-np->rx_skb[i].skb->data,
1598 PCI_DMA_FROMDEVICE);
1599 dev_kfree_skb(np->rx_skb[i].skb);
1600 np->rx_skb[i].skb = NULL;
1605 static void drain_ring(struct net_device *dev)
1611 static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
1613 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
1617 * nv_start_xmit: dev->hard_start_xmit function
1618 * Called with netif_tx_lock held.
1620 static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1622 struct fe_priv *np = netdev_priv(dev);
1624 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
1625 unsigned int fragments = skb_shinfo(skb)->nr_frags;
1629 u32 size = skb->len-skb->data_len;
1630 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1632 struct ring_desc* put_tx;
1633 struct ring_desc* start_tx;
1634 struct ring_desc* prev_tx;
1635 struct nv_skb_map* prev_tx_ctx;
1637 /* add fragments to entries count */
1638 for (i = 0; i < fragments; i++) {
1639 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
1640 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1643 empty_slots = nv_get_empty_tx_slots(np);
1644 if (unlikely(empty_slots <= entries)) {
1645 spin_lock_irq(&np->lock);
1646 netif_stop_queue(dev);
1648 spin_unlock_irq(&np->lock);
1649 return NETDEV_TX_BUSY;
1652 start_tx = put_tx = np->put_tx.orig;
1654 /* setup the header buffer */
1657 prev_tx_ctx = np->put_tx_ctx;
1658 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1659 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1661 np->put_tx_ctx->dma_len = bcnt;
1662 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
1663 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1665 tx_flags = np->tx_flags;
1668 if (unlikely(put_tx++ == np->last_tx.orig))
1669 put_tx = np->first_tx.orig;
1670 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1671 np->put_tx_ctx = np->first_tx_ctx;
1674 /* setup the fragments */
1675 for (i = 0; i < fragments; i++) {
1676 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1677 u32 size = frag->size;
1682 prev_tx_ctx = np->put_tx_ctx;
1683 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1684 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1686 np->put_tx_ctx->dma_len = bcnt;
1687 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
1688 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1692 if (unlikely(put_tx++ == np->last_tx.orig))
1693 put_tx = np->first_tx.orig;
1694 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1695 np->put_tx_ctx = np->first_tx_ctx;
1699 /* set last fragment flag */
1700 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
1702 /* save skb in this slot's context area */
1703 prev_tx_ctx->skb = skb;
1705 if (skb_is_gso(skb))
1706 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
1708 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1709 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
1711 spin_lock_irq(&np->lock);
1714 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1715 np->put_tx.orig = put_tx;
1717 spin_unlock_irq(&np->lock);
1719 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
1720 dev->name, entries, tx_flags_extra);
1723 for (j=0; j<64; j++) {
1725 dprintk("\n%03x:", j);
1726 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
1731 dev->trans_start = jiffies;
1732 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
1733 return NETDEV_TX_OK;
1736 static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
1738 struct fe_priv *np = netdev_priv(dev);
1741 unsigned int fragments = skb_shinfo(skb)->nr_frags;
1745 u32 size = skb->len-skb->data_len;
1746 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1748 struct ring_desc_ex* put_tx;
1749 struct ring_desc_ex* start_tx;
1750 struct ring_desc_ex* prev_tx;
1751 struct nv_skb_map* prev_tx_ctx;
1753 /* add fragments to entries count */
1754 for (i = 0; i < fragments; i++) {
1755 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
1756 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1759 empty_slots = nv_get_empty_tx_slots(np);
1760 if (unlikely(empty_slots <= entries)) {
1761 spin_lock_irq(&np->lock);
1762 netif_stop_queue(dev);
1764 spin_unlock_irq(&np->lock);
1765 return NETDEV_TX_BUSY;
1768 start_tx = put_tx = np->put_tx.ex;
1770 /* setup the header buffer */
1773 prev_tx_ctx = np->put_tx_ctx;
1774 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1775 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1777 np->put_tx_ctx->dma_len = bcnt;
1778 put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
1779 put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
1780 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1782 tx_flags = NV_TX2_VALID;
1785 if (unlikely(put_tx++ == np->last_tx.ex))
1786 put_tx = np->first_tx.ex;
1787 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1788 np->put_tx_ctx = np->first_tx_ctx;
1791 /* setup the fragments */
1792 for (i = 0; i < fragments; i++) {
1793 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1794 u32 size = frag->size;
1799 prev_tx_ctx = np->put_tx_ctx;
1800 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1801 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1803 np->put_tx_ctx->dma_len = bcnt;
1804 put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
1805 put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
1806 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1810 if (unlikely(put_tx++ == np->last_tx.ex))
1811 put_tx = np->first_tx.ex;
1812 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1813 np->put_tx_ctx = np->first_tx_ctx;
1817 /* set last fragment flag */
1818 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
1820 /* save skb in this slot's context area */
1821 prev_tx_ctx->skb = skb;
1823 if (skb_is_gso(skb))
1824 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
1826 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1827 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
1830 if (likely(!np->vlangrp)) {
1831 start_tx->txvlan = 0;
1833 if (vlan_tx_tag_present(skb))
1834 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb));
1836 start_tx->txvlan = 0;
1839 spin_lock_irq(&np->lock);
1842 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1843 np->put_tx.ex = put_tx;
1845 spin_unlock_irq(&np->lock);
1847 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
1848 dev->name, entries, tx_flags_extra);
1851 for (j=0; j<64; j++) {
1853 dprintk("\n%03x:", j);
1854 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
1859 dev->trans_start = jiffies;
1860 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
1861 return NETDEV_TX_OK;
1865 * nv_tx_done: check for completed packets, release the skbs.
1867 * Caller must own np->lock.
1869 static void nv_tx_done(struct net_device *dev)
1871 struct fe_priv *np = netdev_priv(dev);
1873 struct ring_desc* orig_get_tx = np->get_tx.orig;
1875 while ((np->get_tx.orig != np->put_tx.orig) &&
1876 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID)) {
1878 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
1881 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
1882 np->get_tx_ctx->dma_len,
1884 np->get_tx_ctx->dma = 0;
1886 if (np->desc_ver == DESC_VER_1) {
1887 if (flags & NV_TX_LASTPACKET) {
1888 if (flags & NV_TX_ERROR) {
1889 if (flags & NV_TX_UNDERFLOW)
1890 np->stats.tx_fifo_errors++;
1891 if (flags & NV_TX_CARRIERLOST)
1892 np->stats.tx_carrier_errors++;
1893 np->stats.tx_errors++;
1895 np->stats.tx_packets++;
1896 np->stats.tx_bytes += np->get_tx_ctx->skb->len;
1898 dev_kfree_skb_any(np->get_tx_ctx->skb);
1899 np->get_tx_ctx->skb = NULL;
1902 if (flags & NV_TX2_LASTPACKET) {
1903 if (flags & NV_TX2_ERROR) {
1904 if (flags & NV_TX2_UNDERFLOW)
1905 np->stats.tx_fifo_errors++;
1906 if (flags & NV_TX2_CARRIERLOST)
1907 np->stats.tx_carrier_errors++;
1908 np->stats.tx_errors++;
1910 np->stats.tx_packets++;
1911 np->stats.tx_bytes += np->get_tx_ctx->skb->len;
1913 dev_kfree_skb_any(np->get_tx_ctx->skb);
1914 np->get_tx_ctx->skb = NULL;
1917 if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
1918 np->get_tx.orig = np->first_tx.orig;
1919 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
1920 np->get_tx_ctx = np->first_tx_ctx;
1922 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
1924 netif_wake_queue(dev);
1928 static void nv_tx_done_optimized(struct net_device *dev, int limit)
1930 struct fe_priv *np = netdev_priv(dev);
1932 struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
1934 while ((np->get_tx.ex != np->put_tx.ex) &&
1935 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) &&
1938 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
1941 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
1942 np->get_tx_ctx->dma_len,
1944 np->get_tx_ctx->dma = 0;
1946 if (flags & NV_TX2_LASTPACKET) {
1947 if (flags & NV_TX2_ERROR) {
1948 if (flags & NV_TX2_UNDERFLOW)
1949 np->stats.tx_fifo_errors++;
1950 if (flags & NV_TX2_CARRIERLOST)
1951 np->stats.tx_carrier_errors++;
1952 np->stats.tx_errors++;
1954 np->stats.tx_packets++;
1955 np->stats.tx_bytes += np->get_tx_ctx->skb->len;
1957 dev_kfree_skb_any(np->get_tx_ctx->skb);
1958 np->get_tx_ctx->skb = NULL;
1960 if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
1961 np->get_tx.ex = np->first_tx.ex;
1962 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
1963 np->get_tx_ctx = np->first_tx_ctx;
1965 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
1967 netif_wake_queue(dev);
1972 * nv_tx_timeout: dev->tx_timeout function
1973 * Called with netif_tx_lock held.
1975 static void nv_tx_timeout(struct net_device *dev)
1977 struct fe_priv *np = netdev_priv(dev);
1978 u8 __iomem *base = get_hwbase(dev);
1981 if (np->msi_flags & NV_MSI_X_ENABLED)
1982 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
1984 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
1986 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
1991 printk(KERN_INFO "%s: Ring at %lx\n",
1992 dev->name, (unsigned long)np->ring_addr);
1993 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
1994 for (i=0;i<=np->register_size;i+= 32) {
1995 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
1997 readl(base + i + 0), readl(base + i + 4),
1998 readl(base + i + 8), readl(base + i + 12),
1999 readl(base + i + 16), readl(base + i + 20),
2000 readl(base + i + 24), readl(base + i + 28));
2002 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
2003 for (i=0;i<np->tx_ring_size;i+= 4) {
2004 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2005 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2007 le32_to_cpu(np->tx_ring.orig[i].buf),
2008 le32_to_cpu(np->tx_ring.orig[i].flaglen),
2009 le32_to_cpu(np->tx_ring.orig[i+1].buf),
2010 le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
2011 le32_to_cpu(np->tx_ring.orig[i+2].buf),
2012 le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
2013 le32_to_cpu(np->tx_ring.orig[i+3].buf),
2014 le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
2016 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
2018 le32_to_cpu(np->tx_ring.ex[i].bufhigh),
2019 le32_to_cpu(np->tx_ring.ex[i].buflow),
2020 le32_to_cpu(np->tx_ring.ex[i].flaglen),
2021 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
2022 le32_to_cpu(np->tx_ring.ex[i+1].buflow),
2023 le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
2024 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
2025 le32_to_cpu(np->tx_ring.ex[i+2].buflow),
2026 le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
2027 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
2028 le32_to_cpu(np->tx_ring.ex[i+3].buflow),
2029 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2034 spin_lock_irq(&np->lock);
2036 /* 1) stop tx engine */
2039 /* 2) check that the packets were not sent already: */
2040 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2043 nv_tx_done_optimized(dev, np->tx_ring_size);
2045 /* 3) if there are dead entries: clear everything */
2046 if (np->get_tx_ctx != np->put_tx_ctx) {
2047 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
2050 setup_hw_rings(dev, NV_SETUP_TX_RING);
2051 netif_wake_queue(dev);
2054 /* 4) restart tx engine */
2056 spin_unlock_irq(&np->lock);
2060 * Called when the nic notices a mismatch between the actual data len on the
2061 * wire and the len indicated in the 802 header
2063 static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2065 int hdrlen; /* length of the 802 header */
2066 int protolen; /* length as stored in the proto field */
2068 /* 1) calculate len according to header */
2069 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
2070 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
2073 protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
2076 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
2077 dev->name, datalen, protolen, hdrlen);
2078 if (protolen > ETH_DATA_LEN)
2079 return datalen; /* Value in proto field not a len, no checks possible */
2082 /* consistency checks: */
2083 if (datalen > ETH_ZLEN) {
2084 if (datalen >= protolen) {
2085 /* more data on wire than in 802 header, trim of
2088 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2089 dev->name, protolen);
2092 /* less data on wire than mentioned in header.
2093 * Discard the packet.
2095 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
2100 /* short packet. Accept only if 802 values are also short */
2101 if (protolen > ETH_ZLEN) {
2102 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
2106 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2107 dev->name, datalen);
2112 static int nv_rx_process(struct net_device *dev, int limit)
2114 struct fe_priv *np = netdev_priv(dev);
2116 u32 rx_processed_cnt = 0;
2117 struct sk_buff *skb;
2120 while((np->get_rx.orig != np->put_rx.orig) &&
2121 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2122 (rx_processed_cnt++ < limit)) {
2124 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
2128 * the packet is for us - immediately tear down the pci mapping.
2129 * TODO: check if a prefetch of the first cacheline improves
2132 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2133 np->get_rx_ctx->dma_len,
2134 PCI_DMA_FROMDEVICE);
2135 skb = np->get_rx_ctx->skb;
2136 np->get_rx_ctx->skb = NULL;
2140 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2141 for (j=0; j<64; j++) {
2143 dprintk("\n%03x:", j);
2144 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2148 /* look at what we actually got: */
2149 if (np->desc_ver == DESC_VER_1) {
2150 if (likely(flags & NV_RX_DESCRIPTORVALID)) {
2151 len = flags & LEN_MASK_V1;
2152 if (unlikely(flags & NV_RX_ERROR)) {
2153 if (flags & NV_RX_ERROR4) {
2154 len = nv_getlen(dev, skb->data, len);
2156 np->stats.rx_errors++;
2161 /* framing errors are soft errors */
2162 else if (flags & NV_RX_FRAMINGERR) {
2163 if (flags & NV_RX_SUBSTRACT1) {
2167 /* the rest are hard errors */
2169 if (flags & NV_RX_MISSEDFRAME)
2170 np->stats.rx_missed_errors++;
2171 if (flags & NV_RX_CRCERR)
2172 np->stats.rx_crc_errors++;
2173 if (flags & NV_RX_OVERFLOW)
2174 np->stats.rx_over_errors++;
2175 np->stats.rx_errors++;
2185 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2186 len = flags & LEN_MASK_V2;
2187 if (unlikely(flags & NV_RX2_ERROR)) {
2188 if (flags & NV_RX2_ERROR4) {
2189 len = nv_getlen(dev, skb->data, len);
2191 np->stats.rx_errors++;
2196 /* framing errors are soft errors */
2197 else if (flags & NV_RX2_FRAMINGERR) {
2198 if (flags & NV_RX2_SUBSTRACT1) {
2202 /* the rest are hard errors */
2204 if (flags & NV_RX2_CRCERR)
2205 np->stats.rx_crc_errors++;
2206 if (flags & NV_RX2_OVERFLOW)
2207 np->stats.rx_over_errors++;
2208 np->stats.rx_errors++;
2213 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ {
2214 skb->ip_summed = CHECKSUM_UNNECESSARY;
2216 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
2217 (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
2218 skb->ip_summed = CHECKSUM_UNNECESSARY;
2226 /* got a valid packet - forward it to the network core */
2228 skb->protocol = eth_type_trans(skb, dev);
2229 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2230 dev->name, len, skb->protocol);
2231 #ifdef CONFIG_FORCEDETH_NAPI
2232 netif_receive_skb(skb);
2236 dev->last_rx = jiffies;
2237 np->stats.rx_packets++;
2238 np->stats.rx_bytes += len;
2240 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2241 np->get_rx.orig = np->first_rx.orig;
2242 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2243 np->get_rx_ctx = np->first_rx_ctx;
2246 return rx_processed_cnt;
2249 static int nv_rx_process_optimized(struct net_device *dev, int limit)
2251 struct fe_priv *np = netdev_priv(dev);
2254 u32 rx_processed_cnt = 0;
2255 struct sk_buff *skb;
2258 while((np->get_rx.ex != np->put_rx.ex) &&
2259 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2260 (rx_processed_cnt++ < limit)) {
2262 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
2266 * the packet is for us - immediately tear down the pci mapping.
2267 * TODO: check if a prefetch of the first cacheline improves
2270 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2271 np->get_rx_ctx->dma_len,
2272 PCI_DMA_FROMDEVICE);
2273 skb = np->get_rx_ctx->skb;
2274 np->get_rx_ctx->skb = NULL;
2278 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2279 for (j=0; j<64; j++) {
2281 dprintk("\n%03x:", j);
2282 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2286 /* look at what we actually got: */
2287 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2288 len = flags & LEN_MASK_V2;
2289 if (unlikely(flags & NV_RX2_ERROR)) {
2290 if (flags & NV_RX2_ERROR4) {
2291 len = nv_getlen(dev, skb->data, len);
2293 np->stats.rx_errors++;
2298 /* framing errors are soft errors */
2299 else if (flags & NV_RX2_FRAMINGERR) {
2300 if (flags & NV_RX2_SUBSTRACT1) {
2304 /* the rest are hard errors */
2306 if (flags & NV_RX2_CRCERR)
2307 np->stats.rx_crc_errors++;
2308 if (flags & NV_RX2_OVERFLOW)
2309 np->stats.rx_over_errors++;
2310 np->stats.rx_errors++;
2316 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ {
2317 skb->ip_summed = CHECKSUM_UNNECESSARY;
2319 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
2320 (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
2321 skb->ip_summed = CHECKSUM_UNNECESSARY;
2325 /* got a valid packet - forward it to the network core */
2327 skb->protocol = eth_type_trans(skb, dev);
2328 prefetch(skb->data);
2330 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
2331 dev->name, len, skb->protocol);
2333 if (likely(!np->vlangrp)) {
2334 #ifdef CONFIG_FORCEDETH_NAPI
2335 netif_receive_skb(skb);
2340 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2341 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2342 #ifdef CONFIG_FORCEDETH_NAPI
2343 vlan_hwaccel_receive_skb(skb, np->vlangrp,
2344 vlanflags & NV_RX3_VLAN_TAG_MASK);
2346 vlan_hwaccel_rx(skb, np->vlangrp,
2347 vlanflags & NV_RX3_VLAN_TAG_MASK);
2350 #ifdef CONFIG_FORCEDETH_NAPI
2351 netif_receive_skb(skb);
2358 dev->last_rx = jiffies;
2359 np->stats.rx_packets++;
2360 np->stats.rx_bytes += len;
2365 if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
2366 np->get_rx.ex = np->first_rx.ex;
2367 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2368 np->get_rx_ctx = np->first_rx_ctx;
2371 return rx_processed_cnt;
2374 static void set_bufsize(struct net_device *dev)
2376 struct fe_priv *np = netdev_priv(dev);
2378 if (dev->mtu <= ETH_DATA_LEN)
2379 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
2381 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
2385 * nv_change_mtu: dev->change_mtu function
2386 * Called with dev_base_lock held for read.
2388 static int nv_change_mtu(struct net_device *dev, int new_mtu)
2390 struct fe_priv *np = netdev_priv(dev);
2393 if (new_mtu < 64 || new_mtu > np->pkt_limit)
2399 /* return early if the buffer sizes will not change */
2400 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
2402 if (old_mtu == new_mtu)
2405 /* synchronized against open : rtnl_lock() held by caller */
2406 if (netif_running(dev)) {
2407 u8 __iomem *base = get_hwbase(dev);
2409 * It seems that the nic preloads valid ring entries into an
2410 * internal buffer. The procedure for flushing everything is
2411 * guessed, there is probably a simpler approach.
2412 * Changing the MTU is a rare event, it shouldn't matter.
2414 nv_disable_irq(dev);
2415 netif_tx_lock_bh(dev);
2416 spin_lock(&np->lock);
2421 /* drain rx queue */
2424 /* reinit driver view of the rx queue */
2426 if (nv_init_ring(dev)) {
2427 if (!np->in_shutdown)
2428 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2430 /* reinit nic view of the rx queue */
2431 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
2432 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
2433 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
2434 base + NvRegRingSizes);
2436 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2439 /* restart rx engine */
2442 spin_unlock(&np->lock);
2443 netif_tx_unlock_bh(dev);
2449 static void nv_copy_mac_to_hw(struct net_device *dev)
2451 u8 __iomem *base = get_hwbase(dev);
2454 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
2455 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
2456 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
2458 writel(mac[0], base + NvRegMacAddrA);
2459 writel(mac[1], base + NvRegMacAddrB);
2463 * nv_set_mac_address: dev->set_mac_address function
2464 * Called with rtnl_lock() held.
2466 static int nv_set_mac_address(struct net_device *dev, void *addr)
2468 struct fe_priv *np = netdev_priv(dev);
2469 struct sockaddr *macaddr = (struct sockaddr*)addr;
2471 if (!is_valid_ether_addr(macaddr->sa_data))
2472 return -EADDRNOTAVAIL;
2474 /* synchronized against open : rtnl_lock() held by caller */
2475 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
2477 if (netif_running(dev)) {
2478 netif_tx_lock_bh(dev);
2479 spin_lock_irq(&np->lock);
2481 /* stop rx engine */
2484 /* set mac address */
2485 nv_copy_mac_to_hw(dev);
2487 /* restart rx engine */
2489 spin_unlock_irq(&np->lock);
2490 netif_tx_unlock_bh(dev);
2492 nv_copy_mac_to_hw(dev);
2498 * nv_set_multicast: dev->set_multicast function
2499 * Called with netif_tx_lock held.
2501 static void nv_set_multicast(struct net_device *dev)
2503 struct fe_priv *np = netdev_priv(dev);
2504 u8 __iomem *base = get_hwbase(dev);
2507 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
2509 memset(addr, 0, sizeof(addr));
2510 memset(mask, 0, sizeof(mask));
2512 if (dev->flags & IFF_PROMISC) {
2513 pff |= NVREG_PFF_PROMISC;
2515 pff |= NVREG_PFF_MYADDR;
2517 if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
2521 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
2522 if (dev->flags & IFF_ALLMULTI) {
2523 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
2525 struct dev_mc_list *walk;
2527 walk = dev->mc_list;
2528 while (walk != NULL) {
2530 a = le32_to_cpu(*(u32 *) walk->dmi_addr);
2531 b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4]));
2539 addr[0] = alwaysOn[0];
2540 addr[1] = alwaysOn[1];
2541 mask[0] = alwaysOn[0] | alwaysOff[0];
2542 mask[1] = alwaysOn[1] | alwaysOff[1];
2545 addr[0] |= NVREG_MCASTADDRA_FORCE;
2546 pff |= NVREG_PFF_ALWAYS;
2547 spin_lock_irq(&np->lock);
2549 writel(addr[0], base + NvRegMulticastAddrA);
2550 writel(addr[1], base + NvRegMulticastAddrB);
2551 writel(mask[0], base + NvRegMulticastMaskA);
2552 writel(mask[1], base + NvRegMulticastMaskB);
2553 writel(pff, base + NvRegPacketFilterFlags);
2554 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
2557 spin_unlock_irq(&np->lock);
2560 static void nv_update_pause(struct net_device *dev, u32 pause_flags)
2562 struct fe_priv *np = netdev_priv(dev);
2563 u8 __iomem *base = get_hwbase(dev);
2565 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
2567 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
2568 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
2569 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
2570 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
2571 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2573 writel(pff, base + NvRegPacketFilterFlags);
2576 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
2577 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
2578 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
2579 writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame);
2580 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
2581 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2583 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
2584 writel(regmisc, base + NvRegMisc1);
2590 * nv_update_linkspeed: Setup the MAC according to the link partner
2591 * @dev: Network device to be configured
2593 * The function queries the PHY and checks if there is a link partner.
2594 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
2595 * set to 10 MBit HD.
2597 * The function returns 0 if there is no link partner and 1 if there is
2598 * a good link partner.
2600 static int nv_update_linkspeed(struct net_device *dev)
2602 struct fe_priv *np = netdev_priv(dev);
2603 u8 __iomem *base = get_hwbase(dev);
2606 int adv_lpa, adv_pause, lpa_pause;
2607 int newls = np->linkspeed;
2608 int newdup = np->duplex;
2611 u32 control_1000, status_1000, phyreg, pause_flags, txreg;
2613 /* BMSR_LSTATUS is latched, read it twice:
2614 * we want the current value.
2616 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
2617 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
2619 if (!(mii_status & BMSR_LSTATUS)) {
2620 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
2622 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2628 if (np->autoneg == 0) {
2629 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
2630 dev->name, np->fixed_mode);
2631 if (np->fixed_mode & LPA_100FULL) {
2632 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2634 } else if (np->fixed_mode & LPA_100HALF) {
2635 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2637 } else if (np->fixed_mode & LPA_10FULL) {
2638 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2641 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2647 /* check auto negotiation is complete */
2648 if (!(mii_status & BMSR_ANEGCOMPLETE)) {
2649 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
2650 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2653 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
2657 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
2658 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
2659 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
2660 dev->name, adv, lpa);
2663 if (np->gigabit == PHY_GIGABIT) {
2664 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
2665 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
2667 if ((control_1000 & ADVERTISE_1000FULL) &&
2668 (status_1000 & LPA_1000FULL)) {
2669 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
2671 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
2677 /* FIXME: handle parallel detection properly */
2678 adv_lpa = lpa & adv;
2679 if (adv_lpa & LPA_100FULL) {
2680 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2682 } else if (adv_lpa & LPA_100HALF) {
2683 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2685 } else if (adv_lpa & LPA_10FULL) {
2686 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2688 } else if (adv_lpa & LPA_10HALF) {
2689 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2692 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
2693 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2698 if (np->duplex == newdup && np->linkspeed == newls)
2701 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
2702 dev->name, np->linkspeed, np->duplex, newls, newdup);
2704 np->duplex = newdup;
2705 np->linkspeed = newls;
2707 if (np->gigabit == PHY_GIGABIT) {
2708 phyreg = readl(base + NvRegRandomSeed);
2709 phyreg &= ~(0x3FF00);
2710 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
2711 phyreg |= NVREG_RNDSEED_FORCE3;
2712 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
2713 phyreg |= NVREG_RNDSEED_FORCE2;
2714 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
2715 phyreg |= NVREG_RNDSEED_FORCE;
2716 writel(phyreg, base + NvRegRandomSeed);
2719 phyreg = readl(base + NvRegPhyInterface);
2720 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
2721 if (np->duplex == 0)
2723 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
2725 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
2727 writel(phyreg, base + NvRegPhyInterface);
2729 if (phyreg & PHY_RGMII) {
2730 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
2731 txreg = NVREG_TX_DEFERRAL_RGMII_1000;
2733 txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
2735 txreg = NVREG_TX_DEFERRAL_DEFAULT;
2737 writel(txreg, base + NvRegTxDeferral);
2739 if (np->desc_ver == DESC_VER_1) {
2740 txreg = NVREG_TX_WM_DESC1_DEFAULT;
2742 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
2743 txreg = NVREG_TX_WM_DESC2_3_1000;
2745 txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
2747 writel(txreg, base + NvRegTxWatermark);
2749 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
2752 writel(np->linkspeed, base + NvRegLinkSpeed);
2756 /* setup pause frame */
2757 if (np->duplex != 0) {
2758 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
2759 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
2760 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
2762 switch (adv_pause) {
2763 case ADVERTISE_PAUSE_CAP:
2764 if (lpa_pause & LPA_PAUSE_CAP) {
2765 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2766 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
2767 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2770 case ADVERTISE_PAUSE_ASYM:
2771 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
2773 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2776 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
2777 if (lpa_pause & LPA_PAUSE_CAP)
2779 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2780 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
2781 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2783 if (lpa_pause == LPA_PAUSE_ASYM)
2785 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2790 pause_flags = np->pause_flags;
2793 nv_update_pause(dev, pause_flags);
2798 static void nv_linkchange(struct net_device *dev)
2800 if (nv_update_linkspeed(dev)) {
2801 if (!netif_carrier_ok(dev)) {
2802 netif_carrier_on(dev);
2803 printk(KERN_INFO "%s: link up.\n", dev->name);
2807 if (netif_carrier_ok(dev)) {
2808 netif_carrier_off(dev);
2809 printk(KERN_INFO "%s: link down.\n", dev->name);
2815 static void nv_link_irq(struct net_device *dev)
2817 u8 __iomem *base = get_hwbase(dev);
2820 miistat = readl(base + NvRegMIIStatus);
2821 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
2822 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
2824 if (miistat & (NVREG_MIISTAT_LINKCHANGE))
2826 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
2829 static irqreturn_t nv_nic_irq(int foo, void *data)
2831 struct net_device *dev = (struct net_device *) data;
2832 struct fe_priv *np = netdev_priv(dev);
2833 u8 __iomem *base = get_hwbase(dev);
2837 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
2840 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
2841 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2842 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
2844 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2845 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
2847 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2848 if (!(events & np->irqmask))
2851 spin_lock(&np->lock);
2853 spin_unlock(&np->lock);
2855 #ifdef CONFIG_FORCEDETH_NAPI
2856 if (events & NVREG_IRQ_RX_ALL) {
2857 netif_rx_schedule(dev);
2859 /* Disable furthur receive irq's */
2860 spin_lock(&np->lock);
2861 np->irqmask &= ~NVREG_IRQ_RX_ALL;
2863 if (np->msi_flags & NV_MSI_X_ENABLED)
2864 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2866 writel(np->irqmask, base + NvRegIrqMask);
2867 spin_unlock(&np->lock);
2870 if (nv_rx_process(dev, dev->weight)) {
2871 if (unlikely(nv_alloc_rx(dev))) {
2872 spin_lock(&np->lock);
2873 if (!np->in_shutdown)
2874 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2875 spin_unlock(&np->lock);
2879 if (unlikely(events & NVREG_IRQ_LINK)) {
2880 spin_lock(&np->lock);
2882 spin_unlock(&np->lock);
2884 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
2885 spin_lock(&np->lock);
2887 spin_unlock(&np->lock);
2888 np->link_timeout = jiffies + LINK_TIMEOUT;
2890 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
2891 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2894 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
2895 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
2898 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
2899 spin_lock(&np->lock);
2900 /* disable interrupts on the nic */
2901 if (!(np->msi_flags & NV_MSI_X_ENABLED))
2902 writel(0, base + NvRegIrqMask);
2904 writel(np->irqmask, base + NvRegIrqMask);
2907 if (!np->in_shutdown) {
2908 np->nic_poll_irq = np->irqmask;
2909 np->recover_error = 1;
2910 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2912 spin_unlock(&np->lock);
2915 if (unlikely(i > max_interrupt_work)) {
2916 spin_lock(&np->lock);
2917 /* disable interrupts on the nic */
2918 if (!(np->msi_flags & NV_MSI_X_ENABLED))
2919 writel(0, base + NvRegIrqMask);
2921 writel(np->irqmask, base + NvRegIrqMask);
2924 if (!np->in_shutdown) {
2925 np->nic_poll_irq = np->irqmask;
2926 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2928 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
2929 spin_unlock(&np->lock);
2934 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
2936 return IRQ_RETVAL(i);
2939 #define TX_WORK_PER_LOOP 64
2940 #define RX_WORK_PER_LOOP 64
2942 * All _optimized functions are used to help increase performance
2943 * (reduce CPU and increase throughput). They use descripter version 3,
2944 * compiler directives, and reduce memory accesses.
2946 static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
2948 struct net_device *dev = (struct net_device *) data;
2949 struct fe_priv *np = netdev_priv(dev);
2950 u8 __iomem *base = get_hwbase(dev);
2954 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
2957 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
2958 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2959 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
2961 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2962 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
2964 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2965 if (!(events & np->irqmask))
2968 spin_lock(&np->lock);
2969 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
2970 spin_unlock(&np->lock);
2972 #ifdef CONFIG_FORCEDETH_NAPI
2973 if (events & NVREG_IRQ_RX_ALL) {
2974 netif_rx_schedule(dev);
2976 /* Disable furthur receive irq's */
2977 spin_lock(&np->lock);
2978 np->irqmask &= ~NVREG_IRQ_RX_ALL;
2980 if (np->msi_flags & NV_MSI_X_ENABLED)
2981 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2983 writel(np->irqmask, base + NvRegIrqMask);
2984 spin_unlock(&np->lock);
2987 if (nv_rx_process_optimized(dev, dev->weight)) {
2988 if (unlikely(nv_alloc_rx_optimized(dev))) {
2989 spin_lock(&np->lock);
2990 if (!np->in_shutdown)
2991 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2992 spin_unlock(&np->lock);
2996 if (unlikely(events & NVREG_IRQ_LINK)) {
2997 spin_lock(&np->lock);
2999 spin_unlock(&np->lock);
3001 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3002 spin_lock(&np->lock);
3004 spin_unlock(&np->lock);
3005 np->link_timeout = jiffies + LINK_TIMEOUT;
3007 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
3008 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3011 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
3012 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3015 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
3016 spin_lock(&np->lock);
3017 /* disable interrupts on the nic */
3018 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3019 writel(0, base + NvRegIrqMask);
3021 writel(np->irqmask, base + NvRegIrqMask);
3024 if (!np->in_shutdown) {
3025 np->nic_poll_irq = np->irqmask;
3026 np->recover_error = 1;
3027 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3029 spin_unlock(&np->lock);
3033 if (unlikely(i > max_interrupt_work)) {
3034 spin_lock(&np->lock);
3035 /* disable interrupts on the nic */
3036 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3037 writel(0, base + NvRegIrqMask);
3039 writel(np->irqmask, base + NvRegIrqMask);
3042 if (!np->in_shutdown) {
3043 np->nic_poll_irq = np->irqmask;
3044 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3046 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3047 spin_unlock(&np->lock);
3052 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
3054 return IRQ_RETVAL(i);
3057 static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3059 struct net_device *dev = (struct net_device *) data;
3060 struct fe_priv *np = netdev_priv(dev);
3061 u8 __iomem *base = get_hwbase(dev);
3064 unsigned long flags;
3066 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
3069 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3070 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
3071 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
3072 if (!(events & np->irqmask))
3075 spin_lock_irqsave(&np->lock, flags);
3076 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3077 spin_unlock_irqrestore(&np->lock, flags);
3079 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
3080 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3083 if (unlikely(i > max_interrupt_work)) {
3084 spin_lock_irqsave(&np->lock, flags);
3085 /* disable interrupts on the nic */
3086 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
3089 if (!np->in_shutdown) {
3090 np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
3091 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3093 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
3094 spin_unlock_irqrestore(&np->lock, flags);
3099 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
3101 return IRQ_RETVAL(i);
3104 #ifdef CONFIG_FORCEDETH_NAPI
3105 static int nv_napi_poll(struct net_device *dev, int *budget)
3107 int pkts, limit = min(*budget, dev->quota);
3108 struct fe_priv *np = netdev_priv(dev);
3109 u8 __iomem *base = get_hwbase(dev);
3110 unsigned long flags;
3112 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
3113 pkts = nv_rx_process(dev, limit);
3115 pkts = nv_rx_process_optimized(dev, limit);
3117 if (nv_alloc_rx(dev)) {
3118 spin_lock_irqsave(&np->lock, flags);
3119 if (!np->in_shutdown)
3120 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3121 spin_unlock_irqrestore(&np->lock, flags);
3125 /* all done, no more packets present */
3126 netif_rx_complete(dev);
3128 /* re-enable receive interrupts */
3129 spin_lock_irqsave(&np->lock, flags);
3131 np->irqmask |= NVREG_IRQ_RX_ALL;
3132 if (np->msi_flags & NV_MSI_X_ENABLED)
3133 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3135 writel(np->irqmask, base + NvRegIrqMask);
3137 spin_unlock_irqrestore(&np->lock, flags);
3140 /* used up our quantum, so reschedule */
3148 #ifdef CONFIG_FORCEDETH_NAPI
3149 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3151 struct net_device *dev = (struct net_device *) data;
3152 u8 __iomem *base = get_hwbase(dev);
3155 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3156 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3159 netif_rx_schedule(dev);
3160 /* disable receive interrupts on the nic */
3161 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3167 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3169 struct net_device *dev = (struct net_device *) data;
3170 struct fe_priv *np = netdev_priv(dev);
3171 u8 __iomem *base = get_hwbase(dev);
3174 unsigned long flags;
3176 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
3179 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3180 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3181 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
3182 if (!(events & np->irqmask))
3185 if (nv_rx_process_optimized(dev, dev->weight)) {
3186 if (unlikely(nv_alloc_rx_optimized(dev))) {
3187 spin_lock_irqsave(&np->lock, flags);
3188 if (!np->in_shutdown)
3189 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3190 spin_unlock_irqrestore(&np->lock, flags);
3194 if (unlikely(i > max_interrupt_work)) {
3195 spin_lock_irqsave(&np->lock, flags);
3196 /* disable interrupts on the nic */
3197 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3200 if (!np->in_shutdown) {
3201 np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
3202 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3204 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
3205 spin_unlock_irqrestore(&np->lock, flags);
3209 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
3211 return IRQ_RETVAL(i);
3215 static irqreturn_t nv_nic_irq_other(int foo, void *data)
3217 struct net_device *dev = (struct net_device *) data;
3218 struct fe_priv *np = netdev_priv(dev);
3219 u8 __iomem *base = get_hwbase(dev);
3222 unsigned long flags;
3224 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
3227 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3228 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
3229 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3230 if (!(events & np->irqmask))
3233 /* check tx in case we reached max loop limit in tx isr */
3234 spin_lock_irqsave(&np->lock, flags);
3235 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3236 spin_unlock_irqrestore(&np->lock, flags);
3238 if (events & NVREG_IRQ_LINK) {
3239 spin_lock_irqsave(&np->lock, flags);
3241 spin_unlock_irqrestore(&np->lock, flags);
3243 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
3244 spin_lock_irqsave(&np->lock, flags);
3246 spin_unlock_irqrestore(&np->lock, flags);
3247 np->link_timeout = jiffies + LINK_TIMEOUT;
3249 if (events & NVREG_IRQ_RECOVER_ERROR) {
3250 spin_lock_irq(&np->lock);
3251 /* disable interrupts on the nic */
3252 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3255 if (!np->in_shutdown) {
3256 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3257 np->recover_error = 1;
3258 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3260 spin_unlock_irq(&np->lock);
3263 if (events & (NVREG_IRQ_UNKNOWN)) {
3264 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3267 if (unlikely(i > max_interrupt_work)) {
3268 spin_lock_irqsave(&np->lock, flags);
3269 /* disable interrupts on the nic */
3270 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3273 if (!np->in_shutdown) {
3274 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3275 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3277 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
3278 spin_unlock_irqrestore(&np->lock, flags);
3283 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
3285 return IRQ_RETVAL(i);
3288 static irqreturn_t nv_nic_irq_test(int foo, void *data)
3290 struct net_device *dev = (struct net_device *) data;
3291 struct fe_priv *np = netdev_priv(dev);
3292 u8 __iomem *base = get_hwbase(dev);
3295 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
3297 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3298 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3299 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3301 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3302 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3305 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3306 if (!(events & NVREG_IRQ_TIMER))
3307 return IRQ_RETVAL(0);
3309 spin_lock(&np->lock);
3311 spin_unlock(&np->lock);
3313 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
3315 return IRQ_RETVAL(1);
3318 static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3320 u8 __iomem *base = get_hwbase(dev);
3324 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3325 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3326 * the remaining 8 interrupts.
3328 for (i = 0; i < 8; i++) {
3329 if ((irqmask >> i) & 0x1) {
3330 msixmap |= vector << (i << 2);
3333 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3336 for (i = 0; i < 8; i++) {
3337 if ((irqmask >> (i + 8)) & 0x1) {
3338 msixmap |= vector << (i << 2);
3341 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3344 static int nv_request_irq(struct net_device *dev, int intr_test)
3346 struct fe_priv *np = get_nvpriv(dev);
3347 u8 __iomem *base = get_hwbase(dev);
3350 irqreturn_t (*handler)(int foo, void *data);
3353 handler = nv_nic_irq_test;
3355 if (np->desc_ver == DESC_VER_3)
3356 handler = nv_nic_irq_optimized;
3358 handler = nv_nic_irq;
3361 if (np->msi_flags & NV_MSI_X_CAPABLE) {
3362 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3363 np->msi_x_entry[i].entry = i;
3365 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
3366 np->msi_flags |= NV_MSI_X_ENABLED;
3367 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3368 /* Request irq for rx handling */
3369 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) {
3370 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
3371 pci_disable_msix(np->pci_dev);
3372 np->msi_flags &= ~NV_MSI_X_ENABLED;
3375 /* Request irq for tx handling */
3376 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) {
3377 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
3378 pci_disable_msix(np->pci_dev);
3379 np->msi_flags &= ~NV_MSI_X_ENABLED;
3382 /* Request irq for link and timer handling */
3383 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) {
3384 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
3385 pci_disable_msix(np->pci_dev);
3386 np->msi_flags &= ~NV_MSI_X_ENABLED;
3389 /* map interrupts to their respective vector */
3390 writel(0, base + NvRegMSIXMap0);
3391 writel(0, base + NvRegMSIXMap1);
3392 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
3393 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
3394 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
3396 /* Request irq for all interrupts */
3397 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
3398 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3399 pci_disable_msix(np->pci_dev);
3400 np->msi_flags &= ~NV_MSI_X_ENABLED;
3404 /* map interrupts to vector 0 */
3405 writel(0, base + NvRegMSIXMap0);
3406 writel(0, base + NvRegMSIXMap1);
3410 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
3411 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
3412 np->msi_flags |= NV_MSI_ENABLED;
3413 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
3414 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3415 pci_disable_msi(np->pci_dev);
3416 np->msi_flags &= ~NV_MSI_ENABLED;
3420 /* map interrupts to vector 0 */
3421 writel(0, base + NvRegMSIMap0);
3422 writel(0, base + NvRegMSIMap1);
3423 /* enable msi vector 0 */
3424 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3428 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
3435 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
3437 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
3442 static void nv_free_irq(struct net_device *dev)
3444 struct fe_priv *np = get_nvpriv(dev);
3447 if (np->msi_flags & NV_MSI_X_ENABLED) {
3448 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3449 free_irq(np->msi_x_entry[i].vector, dev);
3451 pci_disable_msix(np->pci_dev);
3452 np->msi_flags &= ~NV_MSI_X_ENABLED;
3454 free_irq(np->pci_dev->irq, dev);
3455 if (np->msi_flags & NV_MSI_ENABLED) {
3456 pci_disable_msi(np->pci_dev);
3457 np->msi_flags &= ~NV_MSI_ENABLED;
3462 static void nv_do_nic_poll(unsigned long data)
3464 struct net_device *dev = (struct net_device *) data;
3465 struct fe_priv *np = netdev_priv(dev);
3466 u8 __iomem *base = get_hwbase(dev);
3470 * First disable irq(s) and then
3471 * reenable interrupts on the nic, we have to do this before calling
3472 * nv_nic_irq because that may decide to do otherwise
3475 if (!using_multi_irqs(dev)) {
3476 if (np->msi_flags & NV_MSI_X_ENABLED)
3477 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3479 disable_irq_lockdep(dev->irq);
3482 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
3483 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
3484 mask |= NVREG_IRQ_RX_ALL;
3486 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
3487 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
3488 mask |= NVREG_IRQ_TX_ALL;
3490 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
3491 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
3492 mask |= NVREG_IRQ_OTHER;
3495 np->nic_poll_irq = 0;
3497 if (np->recover_error) {
3498 np->recover_error = 0;
3499 printk(KERN_INFO "forcedeth: MAC in recoverable error state\n");
3500 if (netif_running(dev)) {
3501 netif_tx_lock_bh(dev);
3502 spin_lock(&np->lock);
3507 /* drain rx queue */
3510 /* reinit driver view of the rx queue */
3512 if (nv_init_ring(dev)) {
3513 if (!np->in_shutdown)
3514 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3516 /* reinit nic view of the rx queue */
3517 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3518 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3519 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3520 base + NvRegRingSizes);
3522 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3525 /* restart rx engine */
3528 spin_unlock(&np->lock);
3529 netif_tx_unlock_bh(dev);
3533 /* FIXME: Do we need synchronize_irq(dev->irq) here? */
3535 writel(mask, base + NvRegIrqMask);
3538 if (!using_multi_irqs(dev)) {
3540 if (np->msi_flags & NV_MSI_X_ENABLED)
3541 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3543 enable_irq_lockdep(dev->irq);
3545 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
3546 nv_nic_irq_rx(0, dev);
3547 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
3549 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
3550 nv_nic_irq_tx(0, dev);
3551 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
3553 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
3554 nv_nic_irq_other(0, dev);
3555 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
3560 #ifdef CONFIG_NET_POLL_CONTROLLER
3561 static void nv_poll_controller(struct net_device *dev)
3563 nv_do_nic_poll((unsigned long) dev);
3567 static void nv_do_stats_poll(unsigned long data)
3569 struct net_device *dev = (struct net_device *) data;
3570 struct fe_priv *np = netdev_priv(dev);
3572 nv_get_hw_stats(dev);
3574 if (!np->in_shutdown)
3575 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
3578 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3580 struct fe_priv *np = netdev_priv(dev);
3581 strcpy(info->driver, "forcedeth");
3582 strcpy(info->version, FORCEDETH_VERSION);
3583 strcpy(info->bus_info, pci_name(np->pci_dev));
3586 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3588 struct fe_priv *np = netdev_priv(dev);
3589 wolinfo->supported = WAKE_MAGIC;
3591 spin_lock_irq(&np->lock);
3593 wolinfo->wolopts = WAKE_MAGIC;
3594 spin_unlock_irq(&np->lock);
3597 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3599 struct fe_priv *np = netdev_priv(dev);
3600 u8 __iomem *base = get_hwbase(dev);
3603 if (wolinfo->wolopts == 0) {
3605 } else if (wolinfo->wolopts & WAKE_MAGIC) {
3607 flags = NVREG_WAKEUPFLAGS_ENABLE;
3609 if (netif_running(dev)) {
3610 spin_lock_irq(&np->lock);
3611 writel(flags, base + NvRegWakeUpFlags);
3612 spin_unlock_irq(&np->lock);
3617 static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3619 struct fe_priv *np = netdev_priv(dev);
3622 spin_lock_irq(&np->lock);
3623 ecmd->port = PORT_MII;
3624 if (!netif_running(dev)) {
3625 /* We do not track link speed / duplex setting if the
3626 * interface is disabled. Force a link check */
3627 if (nv_update_linkspeed(dev)) {
3628 if (!netif_carrier_ok(dev))
3629 netif_carrier_on(dev);
3631 if (netif_carrier_ok(dev))
3632 netif_carrier_off(dev);
3636 if (netif_carrier_ok(dev)) {
3637 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
3638 case NVREG_LINKSPEED_10:
3639 ecmd->speed = SPEED_10;
3641 case NVREG_LINKSPEED_100:
3642 ecmd->speed = SPEED_100;
3644 case NVREG_LINKSPEED_1000:
3645 ecmd->speed = SPEED_1000;
3648 ecmd->duplex = DUPLEX_HALF;
3650 ecmd->duplex = DUPLEX_FULL;
3656 ecmd->autoneg = np->autoneg;
3658 ecmd->advertising = ADVERTISED_MII;
3660 ecmd->advertising |= ADVERTISED_Autoneg;
3661 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3662 if (adv & ADVERTISE_10HALF)
3663 ecmd->advertising |= ADVERTISED_10baseT_Half;
3664 if (adv & ADVERTISE_10FULL)
3665 ecmd->advertising |= ADVERTISED_10baseT_Full;
3666 if (adv & ADVERTISE_100HALF)
3667 ecmd->advertising |= ADVERTISED_100baseT_Half;
3668 if (adv & ADVERTISE_100FULL)
3669 ecmd->advertising |= ADVERTISED_100baseT_Full;
3670 if (np->gigabit == PHY_GIGABIT) {
3671 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3672 if (adv & ADVERTISE_1000FULL)
3673 ecmd->advertising |= ADVERTISED_1000baseT_Full;
3676 ecmd->supported = (SUPPORTED_Autoneg |
3677 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
3678 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
3680 if (np->gigabit == PHY_GIGABIT)
3681 ecmd->supported |= SUPPORTED_1000baseT_Full;
3683 ecmd->phy_address = np->phyaddr;
3684 ecmd->transceiver = XCVR_EXTERNAL;
3686 /* ignore maxtxpkt, maxrxpkt for now */
3687 spin_unlock_irq(&np->lock);
3691 static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3693 struct fe_priv *np = netdev_priv(dev);
3695 if (ecmd->port != PORT_MII)
3697 if (ecmd->transceiver != XCVR_EXTERNAL)
3699 if (ecmd->phy_address != np->phyaddr) {
3700 /* TODO: support switching between multiple phys. Should be
3701 * trivial, but not enabled due to lack of test hardware. */
3704 if (ecmd->autoneg == AUTONEG_ENABLE) {
3707 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3708 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
3709 if (np->gigabit == PHY_GIGABIT)
3710 mask |= ADVERTISED_1000baseT_Full;
3712 if ((ecmd->advertising & mask) == 0)
3715 } else if (ecmd->autoneg == AUTONEG_DISABLE) {
3716 /* Note: autonegotiation disable, speed 1000 intentionally
3717 * forbidden - noone should need that. */
3719 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
3721 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
3727 netif_carrier_off(dev);
3728 if (netif_running(dev)) {
3729 nv_disable_irq(dev);
3730 netif_tx_lock_bh(dev);
3731 spin_lock(&np->lock);
3735 spin_unlock(&np->lock);
3736 netif_tx_unlock_bh(dev);
3739 if (ecmd->autoneg == AUTONEG_ENABLE) {
3744 /* advertise only what has been requested */
3745 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3746 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3747 if (ecmd->advertising & ADVERTISED_10baseT_Half)
3748 adv |= ADVERTISE_10HALF;
3749 if (ecmd->advertising & ADVERTISED_10baseT_Full)
3750 adv |= ADVERTISE_10FULL;
3751 if (ecmd->advertising & ADVERTISED_100baseT_Half)
3752 adv |= ADVERTISE_100HALF;
3753 if (ecmd->advertising & ADVERTISED_100baseT_Full)
3754 adv |= ADVERTISE_100FULL;
3755 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
3756 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3757 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3758 adv |= ADVERTISE_PAUSE_ASYM;
3759 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
3761 if (np->gigabit == PHY_GIGABIT) {
3762 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3763 adv &= ~ADVERTISE_1000FULL;
3764 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
3765 adv |= ADVERTISE_1000FULL;
3766 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
3769 if (netif_running(dev))
3770 printk(KERN_INFO "%s: link down.\n", dev->name);
3771 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3772 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
3773 bmcr |= BMCR_ANENABLE;
3774 /* reset the phy in order for settings to stick,
3775 * and cause autoneg to start */
3776 if (phy_reset(dev, bmcr)) {
3777 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3781 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
3782 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3789 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3790 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3791 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
3792 adv |= ADVERTISE_10HALF;
3793 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
3794 adv |= ADVERTISE_10FULL;
3795 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
3796 adv |= ADVERTISE_100HALF;
3797 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
3798 adv |= ADVERTISE_100FULL;
3799 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
3800 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
3801 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3802 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3804 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
3805 adv |= ADVERTISE_PAUSE_ASYM;
3806 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3808 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
3809 np->fixed_mode = adv;
3811 if (np->gigabit == PHY_GIGABIT) {
3812 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3813 adv &= ~ADVERTISE_1000FULL;
3814 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
3817 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3818 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
3819 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
3820 bmcr |= BMCR_FULLDPLX;
3821 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
3822 bmcr |= BMCR_SPEED100;
3823 if (np->phy_oui == PHY_OUI_MARVELL) {
3824 /* reset the phy in order for forced mode settings to stick */
3825 if (phy_reset(dev, bmcr)) {
3826 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3830 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3831 if (netif_running(dev)) {
3832 /* Wait a bit and then reconfigure the nic. */
3839 if (netif_running(dev)) {
3848 #define FORCEDETH_REGS_VER 1
3850 static int nv_get_regs_len(struct net_device *dev)
3852 struct fe_priv *np = netdev_priv(dev);
3853 return np->register_size;
3856 static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
3858 struct fe_priv *np = netdev_priv(dev);
3859 u8 __iomem *base = get_hwbase(dev);
3863 regs->version = FORCEDETH_REGS_VER;
3864 spin_lock_irq(&np->lock);
3865 for (i = 0;i <= np->register_size/sizeof(u32); i++)
3866 rbuf[i] = readl(base + i*sizeof(u32));
3867 spin_unlock_irq(&np->lock);
3870 static int nv_nway_reset(struct net_device *dev)
3872 struct fe_priv *np = netdev_priv(dev);
3878 netif_carrier_off(dev);
3879 if (netif_running(dev)) {
3880 nv_disable_irq(dev);
3881 netif_tx_lock_bh(dev);
3882 spin_lock(&np->lock);
3886 spin_unlock(&np->lock);
3887 netif_tx_unlock_bh(dev);
3888 printk(KERN_INFO "%s: link down.\n", dev->name);
3891 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3892 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
3893 bmcr |= BMCR_ANENABLE;
3894 /* reset the phy in order for settings to stick*/
3895 if (phy_reset(dev, bmcr)) {
3896 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3900 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
3901 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3904 if (netif_running(dev)) {
3917 static int nv_set_tso(struct net_device *dev, u32 value)
3919 struct fe_priv *np = netdev_priv(dev);
3921 if ((np->driver_data & DEV_HAS_CHECKSUM))
3922 return ethtool_op_set_tso(dev, value);
3927 static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
3929 struct fe_priv *np = netdev_priv(dev);
3931 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
3932 ring->rx_mini_max_pending = 0;
3933 ring->rx_jumbo_max_pending = 0;
3934 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
3936 ring->rx_pending = np->rx_ring_size;
3937 ring->rx_mini_pending = 0;
3938 ring->rx_jumbo_pending = 0;
3939 ring->tx_pending = np->tx_ring_size;
3942 static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
3944 struct fe_priv *np = netdev_priv(dev);
3945 u8 __iomem *base = get_hwbase(dev);
3946 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
3947 dma_addr_t ring_addr;
3949 if (ring->rx_pending < RX_RING_MIN ||
3950 ring->tx_pending < TX_RING_MIN ||
3951 ring->rx_mini_pending != 0 ||
3952 ring->rx_jumbo_pending != 0 ||
3953 (np->desc_ver == DESC_VER_1 &&
3954 (ring->rx_pending > RING_MAX_DESC_VER_1 ||
3955 ring->tx_pending > RING_MAX_DESC_VER_1)) ||
3956 (np->desc_ver != DESC_VER_1 &&
3957 (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
3958 ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
3962 /* allocate new rings */
3963 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3964 rxtx_ring = pci_alloc_consistent(np->pci_dev,
3965 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
3968 rxtx_ring = pci_alloc_consistent(np->pci_dev,
3969 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
3972 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
3973 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
3974 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
3975 /* fall back to old rings */
3976 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3978 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
3979 rxtx_ring, ring_addr);
3982 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
3983 rxtx_ring, ring_addr);
3992 if (netif_running(dev)) {
3993 nv_disable_irq(dev);
3994 netif_tx_lock_bh(dev);
3995 spin_lock(&np->lock);
4007 /* set new values */
4008 np->rx_ring_size = ring->rx_pending;
4009 np->tx_ring_size = ring->tx_pending;
4010 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4011 np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
4012 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4014 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
4015 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4017 np->rx_skb = (struct nv_skb_map*)rx_skbuff;
4018 np->tx_skb = (struct nv_skb_map*)tx_skbuff;
4019 np->ring_addr = ring_addr;
4021 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4022 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4024 if (netif_running(dev)) {
4025 /* reinit driver view of the queues */
4027 if (nv_init_ring(dev)) {
4028 if (!np->in_shutdown)
4029 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4032 /* reinit nic view of the queues */
4033 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4034 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4035 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4036 base + NvRegRingSizes);
4038 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4041 /* restart engines */
4044 spin_unlock(&np->lock);
4045 netif_tx_unlock_bh(dev);
4053 static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4055 struct fe_priv *np = netdev_priv(dev);
4057 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
4058 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
4059 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
4062 static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4064 struct fe_priv *np = netdev_priv(dev);
4067 if ((!np->autoneg && np->duplex == 0) ||
4068 (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4069 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
4073 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4074 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
4078 netif_carrier_off(dev);
4079 if (netif_running(dev)) {
4080 nv_disable_irq(dev);
4081 netif_tx_lock_bh(dev);
4082 spin_lock(&np->lock);
4086 spin_unlock(&np->lock);
4087 netif_tx_unlock_bh(dev);
4090 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
4091 if (pause->rx_pause)
4092 np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
4093 if (pause->tx_pause)
4094 np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
4096 if (np->autoneg && pause->autoneg) {
4097 np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
4099 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4100 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4101 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
4102 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4103 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4104 adv |= ADVERTISE_PAUSE_ASYM;
4105 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4107 if (netif_running(dev))
4108 printk(KERN_INFO "%s: link down.\n", dev->name);
4109 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4110 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4111 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4113 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4114 if (pause->rx_pause)
4115 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4116 if (pause->tx_pause)
4117 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4119 if (!netif_running(dev))
4120 nv_update_linkspeed(dev);
4122 nv_update_pause(dev, np->pause_flags);
4125 if (netif_running(dev)) {
4133 static u32 nv_get_rx_csum(struct net_device *dev)
4135 struct fe_priv *np = netdev_priv(dev);
4136 return (np->rx_csum) != 0;
4139 static int nv_set_rx_csum(struct net_device *dev, u32 data)
4141 struct fe_priv *np = netdev_priv(dev);
4142 u8 __iomem *base = get_hwbase(dev);
4145 if (np->driver_data & DEV_HAS_CHECKSUM) {
4148 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4151 /* vlan is dependent on rx checksum offload */
4152 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
4153 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
4155 if (netif_running(dev)) {
4156 spin_lock_irq(&np->lock);
4157 writel(np->txrxctl_bits, base + NvRegTxRxControl);
4158 spin_unlock_irq(&np->lock);
4167 static int nv_set_tx_csum(struct net_device *dev, u32 data)
4169 struct fe_priv *np = netdev_priv(dev);
4171 if (np->driver_data & DEV_HAS_CHECKSUM)
4172 return ethtool_op_set_tx_hw_csum(dev, data);
4177 static int nv_set_sg(struct net_device *dev, u32 data)
4179 struct fe_priv *np = netdev_priv(dev);
4181 if (np->driver_data & DEV_HAS_CHECKSUM)
4182 return ethtool_op_set_sg(dev, data);
4187 static int nv_get_stats_count(struct net_device *dev)
4189 struct fe_priv *np = netdev_priv(dev);
4191 if (np->driver_data & DEV_HAS_STATISTICS_V1)
4192 return NV_DEV_STATISTICS_V1_COUNT;
4193 else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4194 return NV_DEV_STATISTICS_V2_COUNT;
4199 static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
4201 struct fe_priv *np = netdev_priv(dev);
4204 nv_do_stats_poll((unsigned long)dev);
4206 memcpy(buffer, &np->estats, nv_get_stats_count(dev)*sizeof(u64));
4209 static int nv_self_test_count(struct net_device *dev)
4211 struct fe_priv *np = netdev_priv(dev);
4213 if (np->driver_data & DEV_HAS_TEST_EXTENDED)
4214 return NV_TEST_COUNT_EXTENDED;
4216 return NV_TEST_COUNT_BASE;
4219 static int nv_link_test(struct net_device *dev)
4221 struct fe_priv *np = netdev_priv(dev);
4224 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4225 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4227 /* check phy link status */
4228 if (!(mii_status & BMSR_LSTATUS))
4234 static int nv_register_test(struct net_device *dev)
4236 u8 __iomem *base = get_hwbase(dev);
4238 u32 orig_read, new_read;
4241 orig_read = readl(base + nv_registers_test[i].reg);
4243 /* xor with mask to toggle bits */
4244 orig_read ^= nv_registers_test[i].mask;
4246 writel(orig_read, base + nv_registers_test[i].reg);
4248 new_read = readl(base + nv_registers_test[i].reg);
4250 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
4253 /* restore original value */
4254 orig_read ^= nv_registers_test[i].mask;
4255 writel(orig_read, base + nv_registers_test[i].reg);
4257 } while (nv_registers_test[++i].reg != 0);
4262 static int nv_interrupt_test(struct net_device *dev)
4264 struct fe_priv *np = netdev_priv(dev);
4265 u8 __iomem *base = get_hwbase(dev);
4268 u32 save_msi_flags, save_poll_interval = 0;
4270 if (netif_running(dev)) {
4271 /* free current irq */
4273 save_poll_interval = readl(base+NvRegPollingInterval);
4276 /* flag to test interrupt handler */
4279 /* setup test irq */
4280 save_msi_flags = np->msi_flags;
4281 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
4282 np->msi_flags |= 0x001; /* setup 1 vector */
4283 if (nv_request_irq(dev, 1))
4286 /* setup timer interrupt */
4287 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4288 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4290 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4292 /* wait for at least one interrupt */
4295 spin_lock_irq(&np->lock);
4297 /* flag should be set within ISR */
4298 testcnt = np->intr_test;
4302 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4303 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4304 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4306 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4308 spin_unlock_irq(&np->lock);
4312 np->msi_flags = save_msi_flags;
4314 if (netif_running(dev)) {
4315 writel(save_poll_interval, base + NvRegPollingInterval);
4316 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4317 /* restore original irq */
4318 if (nv_request_irq(dev, 0))
4325 static int nv_loopback_test(struct net_device *dev)
4327 struct fe_priv *np = netdev_priv(dev);
4328 u8 __iomem *base = get_hwbase(dev);
4329 struct sk_buff *tx_skb, *rx_skb;
4330 dma_addr_t test_dma_addr;
4331 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
4333 int len, i, pkt_len;
4335 u32 filter_flags = 0;
4336 u32 misc1_flags = 0;
4339 if (netif_running(dev)) {
4340 nv_disable_irq(dev);
4341 filter_flags = readl(base + NvRegPacketFilterFlags);
4342 misc1_flags = readl(base + NvRegMisc1);
4347 /* reinit driver view of the rx queue */
4351 /* setup hardware for loopback */
4352 writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
4353 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
4355 /* reinit nic view of the rx queue */
4356 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4357 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4358 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4359 base + NvRegRingSizes);
4362 /* restart rx engine */
4366 /* setup packet for tx */
4367 pkt_len = ETH_DATA_LEN;
4368 tx_skb = dev_alloc_skb(pkt_len);
4370 printk(KERN_ERR "dev_alloc_skb() failed during loopback test"
4371 " of %s\n", dev->name);
4375 pkt_data = skb_put(tx_skb, pkt_len);
4376 for (i = 0; i < pkt_len; i++)
4377 pkt_data[i] = (u8)(i & 0xff);
4378 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
4379 tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
4381 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4382 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
4383 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4385 np->tx_ring.ex[0].bufhigh = cpu_to_le64(test_dma_addr) >> 32;
4386 np->tx_ring.ex[0].buflow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
4387 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4389 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4390 pci_push(get_hwbase(dev));
4394 /* check for rx of the packet */
4395 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4396 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
4397 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
4400 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
4401 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
4404 if (flags & NV_RX_AVAIL) {
4406 } else if (np->desc_ver == DESC_VER_1) {
4407 if (flags & NV_RX_ERROR)
4410 if (flags & NV_RX2_ERROR) {
4416 if (len != pkt_len) {
4418 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
4419 dev->name, len, pkt_len);
4421 rx_skb = np->rx_skb[0].skb;
4422 for (i = 0; i < pkt_len; i++) {
4423 if (rx_skb->data[i] != (u8)(i & 0xff)) {
4425 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
4432 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
4435 pci_unmap_page(np->pci_dev, test_dma_addr,
4436 tx_skb->end-tx_skb->data,
4438 dev_kfree_skb_any(tx_skb);
4444 /* drain rx queue */
4448 if (netif_running(dev)) {
4449 writel(misc1_flags, base + NvRegMisc1);
4450 writel(filter_flags, base + NvRegPacketFilterFlags);
4457 static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
4459 struct fe_priv *np = netdev_priv(dev);
4460 u8 __iomem *base = get_hwbase(dev);
4462 memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64));
4464 if (!nv_link_test(dev)) {
4465 test->flags |= ETH_TEST_FL_FAILED;
4469 if (test->flags & ETH_TEST_FL_OFFLINE) {
4470 if (netif_running(dev)) {
4471 netif_stop_queue(dev);
4472 netif_poll_disable(dev);
4473 netif_tx_lock_bh(dev);
4474 spin_lock_irq(&np->lock);
4475 nv_disable_hw_interrupts(dev, np->irqmask);
4476 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
4477 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4479 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4485 /* drain rx queue */
4488 spin_unlock_irq(&np->lock);
4489 netif_tx_unlock_bh(dev);
4492 if (!nv_register_test(dev)) {
4493 test->flags |= ETH_TEST_FL_FAILED;
4497 result = nv_interrupt_test(dev);
4499 test->flags |= ETH_TEST_FL_FAILED;
4507 if (!nv_loopback_test(dev)) {
4508 test->flags |= ETH_TEST_FL_FAILED;
4512 if (netif_running(dev)) {
4513 /* reinit driver view of the rx queue */
4515 if (nv_init_ring(dev)) {
4516 if (!np->in_shutdown)
4517 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4519 /* reinit nic view of the rx queue */
4520 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4521 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4522 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4523 base + NvRegRingSizes);
4525 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4527 /* restart rx engine */
4530 netif_start_queue(dev);
4531 netif_poll_enable(dev);
4532 nv_enable_hw_interrupts(dev, np->irqmask);
4537 static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
4539 switch (stringset) {
4541 memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str));
4544 memcpy(buffer, &nv_etests_str, nv_self_test_count(dev)*sizeof(struct nv_ethtool_str));
4549 static const struct ethtool_ops ops = {
4550 .get_drvinfo = nv_get_drvinfo,
4551 .get_link = ethtool_op_get_link,
4552 .get_wol = nv_get_wol,
4553 .set_wol = nv_set_wol,
4554 .get_settings = nv_get_settings,
4555 .set_settings = nv_set_settings,
4556 .get_regs_len = nv_get_regs_len,
4557 .get_regs = nv_get_regs,
4558 .nway_reset = nv_nway_reset,
4559 .get_perm_addr = ethtool_op_get_perm_addr,
4560 .get_tso = ethtool_op_get_tso,
4561 .set_tso = nv_set_tso,
4562 .get_ringparam = nv_get_ringparam,
4563 .set_ringparam = nv_set_ringparam,
4564 .get_pauseparam = nv_get_pauseparam,
4565 .set_pauseparam = nv_set_pauseparam,
4566 .get_rx_csum = nv_get_rx_csum,
4567 .set_rx_csum = nv_set_rx_csum,
4568 .get_tx_csum = ethtool_op_get_tx_csum,
4569 .set_tx_csum = nv_set_tx_csum,
4570 .get_sg = ethtool_op_get_sg,
4571 .set_sg = nv_set_sg,
4572 .get_strings = nv_get_strings,
4573 .get_stats_count = nv_get_stats_count,
4574 .get_ethtool_stats = nv_get_ethtool_stats,
4575 .self_test_count = nv_self_test_count,
4576 .self_test = nv_self_test,
4579 static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
4581 struct fe_priv *np = get_nvpriv(dev);
4583 spin_lock_irq(&np->lock);
4585 /* save vlan group */
4589 /* enable vlan on MAC */
4590 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
4592 /* disable vlan on MAC */
4593 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
4594 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
4597 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4599 spin_unlock_irq(&np->lock);
4602 static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
4607 /* The mgmt unit and driver use a semaphore to access the phy during init */
4608 static int nv_mgmt_acquire_sema(struct net_device *dev)
4610 u8 __iomem *base = get_hwbase(dev);
4612 u32 tx_ctrl, mgmt_sema;
4614 for (i = 0; i < 10; i++) {
4615 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
4616 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
4621 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
4624 for (i = 0; i < 2; i++) {
4625 tx_ctrl = readl(base + NvRegTransmitterControl);
4626 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
4627 writel(tx_ctrl, base + NvRegTransmitterControl);
4629 /* verify that semaphore was acquired */
4630 tx_ctrl = readl(base + NvRegTransmitterControl);
4631 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
4632 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE))
4641 static int nv_open(struct net_device *dev)
4643 struct fe_priv *np = netdev_priv(dev);
4644 u8 __iomem *base = get_hwbase(dev);
4648 dprintk(KERN_DEBUG "nv_open: begin\n");
4650 /* erase previous misconfiguration */
4651 if (np->driver_data & DEV_HAS_POWER_CNTRL)
4653 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
4654 writel(0, base + NvRegMulticastAddrB);
4655 writel(0, base + NvRegMulticastMaskA);
4656 writel(0, base + NvRegMulticastMaskB);
4657 writel(0, base + NvRegPacketFilterFlags);
4659 writel(0, base + NvRegTransmitterControl);
4660 writel(0, base + NvRegReceiverControl);
4662 writel(0, base + NvRegAdapterControl);
4664 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
4665 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
4667 /* initialize descriptor rings */
4669 oom = nv_init_ring(dev);
4671 writel(0, base + NvRegLinkSpeed);
4672 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
4674 writel(0, base + NvRegUnknownSetupReg6);
4676 np->in_shutdown = 0;
4679 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4680 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4681 base + NvRegRingSizes);
4683 writel(np->linkspeed, base + NvRegLinkSpeed);
4684 if (np->desc_ver == DESC_VER_1)
4685 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
4687 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
4688 writel(np->txrxctl_bits, base + NvRegTxRxControl);
4689 writel(np->vlanctl_bits, base + NvRegVlanControl);
4691 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
4692 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
4693 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
4694 KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
4696 writel(0, base + NvRegMIIMask);
4697 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4698 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
4700 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
4701 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
4702 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
4703 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4705 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
4706 get_random_bytes(&i, sizeof(i));
4707 writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
4708 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
4709 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
4710 if (poll_interval == -1) {
4711 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
4712 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
4714 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4717 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
4718 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4719 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
4720 base + NvRegAdapterControl);
4721 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
4722 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
4724 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
4726 i = readl(base + NvRegPowerState);
4727 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
4728 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
4732 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
4734 nv_disable_hw_interrupts(dev, np->irqmask);
4736 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
4737 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4740 if (nv_request_irq(dev, 0)) {
4744 /* ask for interrupts */
4745 nv_enable_hw_interrupts(dev, np->irqmask);
4747 spin_lock_irq(&np->lock);
4748 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
4749 writel(0, base + NvRegMulticastAddrB);
4750 writel(0, base + NvRegMulticastMaskA);
4751 writel(0, base + NvRegMulticastMaskB);
4752 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
4753 /* One manual link speed update: Interrupts are enabled, future link
4754 * speed changes cause interrupts and are handled by nv_link_irq().
4758 miistat = readl(base + NvRegMIIStatus);
4759 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
4760 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
4762 /* set linkspeed to invalid value, thus force nv_update_linkspeed
4765 ret = nv_update_linkspeed(dev);
4768 netif_start_queue(dev);
4769 netif_poll_enable(dev);
4772 netif_carrier_on(dev);
4774 printk("%s: no link during initialization.\n", dev->name);
4775 netif_carrier_off(dev);
4778 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4780 /* start statistics timer */
4781 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2))
4782 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
4784 spin_unlock_irq(&np->lock);
4792 static int nv_close(struct net_device *dev)
4794 struct fe_priv *np = netdev_priv(dev);
4797 spin_lock_irq(&np->lock);
4798 np->in_shutdown = 1;
4799 spin_unlock_irq(&np->lock);
4800 netif_poll_disable(dev);
4801 synchronize_irq(dev->irq);
4803 del_timer_sync(&np->oom_kick);
4804 del_timer_sync(&np->nic_poll);
4805 del_timer_sync(&np->stats_poll);
4807 netif_stop_queue(dev);
4808 spin_lock_irq(&np->lock);
4813 /* disable interrupts on the nic or we will lock up */
4814 base = get_hwbase(dev);
4815 nv_disable_hw_interrupts(dev, np->irqmask);
4817 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
4819 spin_unlock_irq(&np->lock);
4828 /* FIXME: power down nic */
4833 static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
4835 struct net_device *dev;
4840 u32 powerstate, txreg;
4841 u32 phystate_orig = 0, phystate;
4842 int phyinitialized = 0;
4844 dev = alloc_etherdev(sizeof(struct fe_priv));
4849 np = netdev_priv(dev);
4850 np->pci_dev = pci_dev;
4851 spin_lock_init(&np->lock);
4852 SET_MODULE_OWNER(dev);
4853 SET_NETDEV_DEV(dev, &pci_dev->dev);
4855 init_timer(&np->oom_kick);
4856 np->oom_kick.data = (unsigned long) dev;
4857 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */
4858 init_timer(&np->nic_poll);
4859 np->nic_poll.data = (unsigned long) dev;
4860 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
4861 init_timer(&np->stats_poll);
4862 np->stats_poll.data = (unsigned long) dev;
4863 np->stats_poll.function = &nv_do_stats_poll; /* timer handler */
4865 err = pci_enable_device(pci_dev);
4867 printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n",
4868 err, pci_name(pci_dev));
4872 pci_set_master(pci_dev);
4874 err = pci_request_regions(pci_dev, DRV_NAME);
4878 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2))
4879 np->register_size = NV_PCI_REGSZ_VER3;
4880 else if (id->driver_data & DEV_HAS_STATISTICS_V1)
4881 np->register_size = NV_PCI_REGSZ_VER2;
4883 np->register_size = NV_PCI_REGSZ_VER1;
4887 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
4888 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
4889 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
4890 pci_resource_len(pci_dev, i),
4891 pci_resource_flags(pci_dev, i));
4892 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
4893 pci_resource_len(pci_dev, i) >= np->register_size) {
4894 addr = pci_resource_start(pci_dev, i);
4898 if (i == DEVICE_COUNT_RESOURCE) {
4899 printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n",
4904 /* copy of driver data */
4905 np->driver_data = id->driver_data;
4907 /* handle different descriptor versions */
4908 if (id->driver_data & DEV_HAS_HIGH_DMA) {
4909 /* packet format 3: supports 40-bit addressing */
4910 np->desc_ver = DESC_VER_3;
4911 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
4913 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
4914 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
4917 dev->features |= NETIF_F_HIGHDMA;
4918 printk(KERN_INFO "forcedeth: using HIGHDMA\n");
4920 if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) {
4921 printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed, using 32-bit ring buffers for device %s.\n",
4925 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
4926 /* packet format 2: supports jumbo frames */
4927 np->desc_ver = DESC_VER_2;
4928 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
4930 /* original packet format */
4931 np->desc_ver = DESC_VER_1;
4932 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
4935 np->pkt_limit = NV_PKTLIMIT_1;
4936 if (id->driver_data & DEV_HAS_LARGEDESC)
4937 np->pkt_limit = NV_PKTLIMIT_2;
4939 if (id->driver_data & DEV_HAS_CHECKSUM) {
4941 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4942 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
4943 dev->features |= NETIF_F_TSO;
4946 np->vlanctl_bits = 0;
4947 if (id->driver_data & DEV_HAS_VLAN) {
4948 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
4949 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
4950 dev->vlan_rx_register = nv_vlan_rx_register;
4951 dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid;
4955 if ((id->driver_data & DEV_HAS_MSI) && msi) {
4956 np->msi_flags |= NV_MSI_CAPABLE;
4958 if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
4959 np->msi_flags |= NV_MSI_X_CAPABLE;
4962 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
4963 if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) {
4964 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
4969 np->base = ioremap(addr, np->register_size);
4972 dev->base_addr = (unsigned long)np->base;
4974 dev->irq = pci_dev->irq;
4976 np->rx_ring_size = RX_RING_DEFAULT;
4977 np->tx_ring_size = TX_RING_DEFAULT;
4979 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4980 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
4981 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
4983 if (!np->rx_ring.orig)
4985 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4987 np->rx_ring.ex = pci_alloc_consistent(pci_dev,
4988 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
4990 if (!np->rx_ring.ex)
4992 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4994 np->rx_skb = kmalloc(sizeof(struct nv_skb_map) * np->rx_ring_size, GFP_KERNEL);
4995 np->tx_skb = kmalloc(sizeof(struct nv_skb_map) * np->tx_ring_size, GFP_KERNEL);
4996 if (!np->rx_skb || !np->tx_skb)
4998 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4999 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
5001 dev->open = nv_open;
5002 dev->stop = nv_close;
5003 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
5004 dev->hard_start_xmit = nv_start_xmit;
5006 dev->hard_start_xmit = nv_start_xmit_optimized;
5007 dev->get_stats = nv_get_stats;
5008 dev->change_mtu = nv_change_mtu;
5009 dev->set_mac_address = nv_set_mac_address;
5010 dev->set_multicast_list = nv_set_multicast;
5011 #ifdef CONFIG_NET_POLL_CONTROLLER
5012 dev->poll_controller = nv_poll_controller;
5014 dev->weight = RX_WORK_PER_LOOP;
5015 #ifdef CONFIG_FORCEDETH_NAPI
5016 dev->poll = nv_napi_poll;
5018 SET_ETHTOOL_OPS(dev, &ops);
5019 dev->tx_timeout = nv_tx_timeout;
5020 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5022 pci_set_drvdata(pci_dev, dev);
5024 /* read the mac address */
5025 base = get_hwbase(dev);
5026 np->orig_mac[0] = readl(base + NvRegMacAddrA);
5027 np->orig_mac[1] = readl(base + NvRegMacAddrB);
5029 /* check the workaround bit for correct mac address order */
5030 txreg = readl(base + NvRegTransmitPoll);
5031 if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
5032 /* mac address is already in correct order */
5033 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5034 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
5035 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5036 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5037 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
5038 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
5040 /* need to reverse mac address to correct order */
5041 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
5042 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
5043 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
5044 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
5045 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
5046 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
5047 /* set permanent address to be correct aswell */
5048 np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
5049 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
5050 np->orig_mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
5051 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5053 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5055 if (!is_valid_ether_addr(dev->perm_addr)) {
5057 * Bad mac address. At least one bios sets the mac address
5058 * to 01:23:45:67:89:ab
5060 printk(KERN_ERR "%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n",
5062 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
5063 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
5064 printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n");
5065 dev->dev_addr[0] = 0x00;
5066 dev->dev_addr[1] = 0x00;
5067 dev->dev_addr[2] = 0x6c;
5068 get_random_bytes(&dev->dev_addr[3], 3);
5071 dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev),
5072 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
5073 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
5075 /* set mac address */
5076 nv_copy_mac_to_hw(dev);
5079 writel(0, base + NvRegWakeUpFlags);
5082 if (id->driver_data & DEV_HAS_POWER_CNTRL) {
5084 pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id);
5086 /* take phy and nic out of low power mode */
5087 powerstate = readl(base + NvRegPowerState2);
5088 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
5089 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
5090 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
5091 revision_id >= 0xA3)
5092 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
5093 writel(powerstate, base + NvRegPowerState2);
5096 if (np->desc_ver == DESC_VER_1) {
5097 np->tx_flags = NV_TX_VALID;
5099 np->tx_flags = NV_TX2_VALID;
5101 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
5102 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5103 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5104 np->msi_flags |= 0x0003;
5106 np->irqmask = NVREG_IRQMASK_CPU;
5107 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5108 np->msi_flags |= 0x0001;
5111 if (id->driver_data & DEV_NEED_TIMERIRQ)
5112 np->irqmask |= NVREG_IRQ_TIMER;
5113 if (id->driver_data & DEV_NEED_LINKTIMER) {
5114 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
5115 np->need_linktimer = 1;
5116 np->link_timeout = jiffies + LINK_TIMEOUT;
5118 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
5119 np->need_linktimer = 0;
5122 /* clear phy state and temporarily halt phy interrupts */
5123 writel(0, base + NvRegMIIMask);
5124 phystate = readl(base + NvRegAdapterControl);
5125 if (phystate & NVREG_ADAPTCTL_RUNNING) {
5127 phystate &= ~NVREG_ADAPTCTL_RUNNING;
5128 writel(phystate, base + NvRegAdapterControl);
5130 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
5132 if (id->driver_data & DEV_HAS_MGMT_UNIT) {
5133 /* management unit running on the mac? */
5134 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) {
5135 np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
5136 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use);
5137 for (i = 0; i < 5000; i++) {
5139 if (nv_mgmt_acquire_sema(dev)) {
5140 /* management unit setup the phy already? */
5141 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5142 NVREG_XMITCTL_SYNC_PHY_INIT) {
5143 /* phy is inited by mgmt unit */
5145 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev));
5147 /* we need to init the phy */
5155 /* find a suitable phy */
5156 for (i = 1; i <= 32; i++) {
5158 int phyaddr = i & 0x1F;
5160 spin_lock_irq(&np->lock);
5161 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
5162 spin_unlock_irq(&np->lock);
5163 if (id1 < 0 || id1 == 0xffff)
5165 spin_lock_irq(&np->lock);
5166 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
5167 spin_unlock_irq(&np->lock);
5168 if (id2 < 0 || id2 == 0xffff)
5171 np->phy_model = id2 & PHYID2_MODEL_MASK;
5172 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
5173 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
5174 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
5175 pci_name(pci_dev), id1, id2, phyaddr);
5176 np->phyaddr = phyaddr;
5177 np->phy_oui = id1 | id2;
5181 printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
5186 if (!phyinitialized) {
5190 /* see if it is a gigabit phy */
5191 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5192 if (mii_status & PHY_GIGABIT) {
5193 np->gigabit = PHY_GIGABIT;
5197 /* set default link speed settings */
5198 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
5202 err = register_netdev(dev);
5204 printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err);
5207 printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n",
5208 dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device,
5215 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
5216 pci_set_drvdata(pci_dev, NULL);
5220 iounmap(get_hwbase(dev));
5222 pci_release_regions(pci_dev);
5224 pci_disable_device(pci_dev);
5231 static void __devexit nv_remove(struct pci_dev *pci_dev)
5233 struct net_device *dev = pci_get_drvdata(pci_dev);
5234 struct fe_priv *np = netdev_priv(dev);
5235 u8 __iomem *base = get_hwbase(dev);
5237 unregister_netdev(dev);
5239 /* special op: write back the misordered MAC address - otherwise
5240 * the next nv_probe would see a wrong address.
5242 writel(np->orig_mac[0], base + NvRegMacAddrA);
5243 writel(np->orig_mac[1], base + NvRegMacAddrB);
5245 /* free all structures */
5247 iounmap(get_hwbase(dev));
5248 pci_release_regions(pci_dev);
5249 pci_disable_device(pci_dev);
5251 pci_set_drvdata(pci_dev, NULL);
5255 static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
5257 struct net_device *dev = pci_get_drvdata(pdev);
5258 struct fe_priv *np = netdev_priv(dev);
5260 if (!netif_running(dev))
5263 netif_device_detach(dev);
5268 pci_save_state(pdev);
5269 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
5270 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5275 static int nv_resume(struct pci_dev *pdev)
5277 struct net_device *dev = pci_get_drvdata(pdev);
5280 if (!netif_running(dev))
5283 netif_device_attach(dev);
5285 pci_set_power_state(pdev, PCI_D0);
5286 pci_restore_state(pdev);
5287 pci_enable_wake(pdev, PCI_D0, 0);
5294 #define nv_suspend NULL
5295 #define nv_resume NULL
5296 #endif /* CONFIG_PM */
5298 static struct pci_device_id pci_tbl[] = {
5299 { /* nForce Ethernet Controller */
5300 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1),
5301 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5303 { /* nForce2 Ethernet Controller */
5304 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2),
5305 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5307 { /* nForce3 Ethernet Controller */
5308 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3),
5309 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5311 { /* nForce3 Ethernet Controller */
5312 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
5313 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5315 { /* nForce3 Ethernet Controller */
5316 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
5317 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5319 { /* nForce3 Ethernet Controller */
5320 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
5321 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5323 { /* nForce3 Ethernet Controller */
5324 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
5325 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5327 { /* CK804 Ethernet Controller */
5328 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
5329 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
5331 { /* CK804 Ethernet Controller */
5332 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
5333 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
5335 { /* MCP04 Ethernet Controller */
5336 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
5337 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
5339 { /* MCP04 Ethernet Controller */
5340 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
5341 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
5343 { /* MCP51 Ethernet Controller */
5344 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
5345 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
5347 { /* MCP51 Ethernet Controller */
5348 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
5349 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
5351 { /* MCP55 Ethernet Controller */
5352 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
5353 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5355 { /* MCP55 Ethernet Controller */
5356 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
5357 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5359 { /* MCP61 Ethernet Controller */
5360 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
5361 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5363 { /* MCP61 Ethernet Controller */
5364 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
5365 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5367 { /* MCP61 Ethernet Controller */
5368 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
5369 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5371 { /* MCP61 Ethernet Controller */
5372 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
5373 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5375 { /* MCP65 Ethernet Controller */
5376 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
5377 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5379 { /* MCP65 Ethernet Controller */
5380 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
5381 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5383 { /* MCP65 Ethernet Controller */
5384 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
5385 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5387 { /* MCP65 Ethernet Controller */
5388 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
5389 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5391 { /* MCP67 Ethernet Controller */
5392 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
5393 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5395 { /* MCP67 Ethernet Controller */
5396 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
5397 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5399 { /* MCP67 Ethernet Controller */
5400 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
5401 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5403 { /* MCP67 Ethernet Controller */
5404 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
5405 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5410 static struct pci_driver driver = {
5411 .name = "forcedeth",
5412 .id_table = pci_tbl,
5414 .remove = __devexit_p(nv_remove),
5415 .suspend = nv_suspend,
5416 .resume = nv_resume,
5419 static int __init init_nic(void)
5421 printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
5422 return pci_register_driver(&driver);
5425 static void __exit exit_nic(void)
5427 pci_unregister_driver(&driver);
5430 module_param(max_interrupt_work, int, 0);
5431 MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
5432 module_param(optimization_mode, int, 0);
5433 MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
5434 module_param(poll_interval, int, 0);
5435 MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
5436 module_param(msi, int, 0);
5437 MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
5438 module_param(msix, int, 0);
5439 MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
5440 module_param(dma_64bit, int, 0);
5441 MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
5443 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
5444 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
5445 MODULE_LICENSE("GPL");
5447 MODULE_DEVICE_TABLE(pci, pci_tbl);
5449 module_init(init_nic);
5450 module_exit(exit_nic);