2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
6 * and Andrew de Quincey.
8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
9 * trademarks of NVIDIA Corporation in the United States and other
12 * Copyright (C) 2003,4,5 Manfred Spraul
13 * Copyright (C) 2004 Andrew de Quincey (wol support)
14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
15 * IRQ rate fixes, bigendian fixes, cleanups, verification)
16 * Copyright (c) 2004,2005,2006,2007,2008 NVIDIA Corporation
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 * We suspect that on some hardware no TX done interrupts are generated.
34 * This means recovery from netif_stop_queue only happens if the hw timer
35 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
36 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
37 * If your hardware reliably generates tx done interrupts, then you can remove
38 * DEV_NEED_TIMERIRQ from the driver_data flags.
39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
40 * superfluous timer interrupts from the nic.
42 #define FORCEDETH_VERSION "0.61"
43 #define DRV_NAME "forcedeth"
45 #include <linux/module.h>
46 #include <linux/types.h>
47 #include <linux/pci.h>
48 #include <linux/interrupt.h>
49 #include <linux/netdevice.h>
50 #include <linux/etherdevice.h>
51 #include <linux/delay.h>
52 #include <linux/spinlock.h>
53 #include <linux/ethtool.h>
54 #include <linux/timer.h>
55 #include <linux/skbuff.h>
56 #include <linux/mii.h>
57 #include <linux/random.h>
58 #include <linux/init.h>
59 #include <linux/if_vlan.h>
60 #include <linux/dma-mapping.h>
64 #include <asm/uaccess.h>
65 #include <asm/system.h>
68 #define dprintk printk
70 #define dprintk(x...) do { } while (0)
73 #define TX_WORK_PER_LOOP 64
74 #define RX_WORK_PER_LOOP 64
80 #define DEV_NEED_TIMERIRQ 0x00001 /* set the timer irq flag in the irq mask */
81 #define DEV_NEED_LINKTIMER 0x00002 /* poll link settings. Relies on the timer irq */
82 #define DEV_HAS_LARGEDESC 0x00004 /* device supports jumbo frames and needs packet format 2 */
83 #define DEV_HAS_HIGH_DMA 0x00008 /* device supports 64bit dma */
84 #define DEV_HAS_CHECKSUM 0x00010 /* device supports tx and rx checksum offloads */
85 #define DEV_HAS_VLAN 0x00020 /* device supports vlan tagging and striping */
86 #define DEV_HAS_MSI 0x00040 /* device supports MSI */
87 #define DEV_HAS_MSI_X 0x00080 /* device supports MSI-X */
88 #define DEV_HAS_POWER_CNTRL 0x00100 /* device supports power savings */
89 #define DEV_HAS_STATISTICS_V1 0x00200 /* device supports hw statistics version 1 */
90 #define DEV_HAS_STATISTICS_V2 0x00400 /* device supports hw statistics version 2 */
91 #define DEV_HAS_TEST_EXTENDED 0x00800 /* device supports extended diagnostic test */
92 #define DEV_HAS_MGMT_UNIT 0x01000 /* device supports management unit */
93 #define DEV_HAS_CORRECT_MACADDR 0x02000 /* device supports correct mac address order */
94 #define DEV_HAS_COLLISION_FIX 0x04000 /* device supports tx collision fix */
95 #define DEV_HAS_PAUSEFRAME_TX_V1 0x08000 /* device supports tx pause frames version 1 */
96 #define DEV_HAS_PAUSEFRAME_TX_V2 0x10000 /* device supports tx pause frames version 2 */
97 #define DEV_HAS_PAUSEFRAME_TX_V3 0x20000 /* device supports tx pause frames version 3 */
98 #define DEV_NEED_TX_LIMIT 0x40000 /* device needs to limit tx */
99 #define DEV_HAS_GEAR_MODE 0x80000 /* device supports gear mode */
102 NvRegIrqStatus = 0x000,
103 #define NVREG_IRQSTAT_MIIEVENT 0x040
104 #define NVREG_IRQSTAT_MASK 0x81ff
105 NvRegIrqMask = 0x004,
106 #define NVREG_IRQ_RX_ERROR 0x0001
107 #define NVREG_IRQ_RX 0x0002
108 #define NVREG_IRQ_RX_NOBUF 0x0004
109 #define NVREG_IRQ_TX_ERR 0x0008
110 #define NVREG_IRQ_TX_OK 0x0010
111 #define NVREG_IRQ_TIMER 0x0020
112 #define NVREG_IRQ_LINK 0x0040
113 #define NVREG_IRQ_RX_FORCED 0x0080
114 #define NVREG_IRQ_TX_FORCED 0x0100
115 #define NVREG_IRQ_RECOVER_ERROR 0x8000
116 #define NVREG_IRQMASK_THROUGHPUT 0x00df
117 #define NVREG_IRQMASK_CPU 0x0060
118 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
119 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
120 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
122 #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
123 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
124 NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR))
126 NvRegUnknownSetupReg6 = 0x008,
127 #define NVREG_UNKSETUP6_VAL 3
130 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
131 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
133 NvRegPollingInterval = 0x00c,
134 #define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */
135 #define NVREG_POLL_DEFAULT_CPU 13
136 NvRegMSIMap0 = 0x020,
137 NvRegMSIMap1 = 0x024,
138 NvRegMSIIrqMask = 0x030,
139 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
141 #define NVREG_MISC1_PAUSE_TX 0x01
142 #define NVREG_MISC1_HD 0x02
143 #define NVREG_MISC1_FORCE 0x3b0f3c
145 NvRegMacReset = 0x34,
146 #define NVREG_MAC_RESET_ASSERT 0x0F3
147 NvRegTransmitterControl = 0x084,
148 #define NVREG_XMITCTL_START 0x01
149 #define NVREG_XMITCTL_MGMT_ST 0x40000000
150 #define NVREG_XMITCTL_SYNC_MASK 0x000f0000
151 #define NVREG_XMITCTL_SYNC_NOT_READY 0x0
152 #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
153 #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
154 #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
155 #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
156 #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
157 #define NVREG_XMITCTL_HOST_LOADED 0x00004000
158 #define NVREG_XMITCTL_TX_PATH_EN 0x01000000
159 NvRegTransmitterStatus = 0x088,
160 #define NVREG_XMITSTAT_BUSY 0x01
162 NvRegPacketFilterFlags = 0x8c,
163 #define NVREG_PFF_PAUSE_RX 0x08
164 #define NVREG_PFF_ALWAYS 0x7F0000
165 #define NVREG_PFF_PROMISC 0x80
166 #define NVREG_PFF_MYADDR 0x20
167 #define NVREG_PFF_LOOPBACK 0x10
169 NvRegOffloadConfig = 0x90,
170 #define NVREG_OFFLOAD_HOMEPHY 0x601
171 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
172 NvRegReceiverControl = 0x094,
173 #define NVREG_RCVCTL_START 0x01
174 #define NVREG_RCVCTL_RX_PATH_EN 0x01000000
175 NvRegReceiverStatus = 0x98,
176 #define NVREG_RCVSTAT_BUSY 0x01
178 NvRegSlotTime = 0x9c,
179 #define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
180 #define NVREG_SLOTTIME_10_100_FULL 0x00007f00
181 #define NVREG_SLOTTIME_1000_FULL 0x0003ff00
182 #define NVREG_SLOTTIME_HALF 0x0000ff00
183 #define NVREG_SLOTTIME_DEFAULT 0x00007f00
184 #define NVREG_SLOTTIME_MASK 0x000000ff
186 NvRegTxDeferral = 0xA0,
187 #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
188 #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
189 #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
190 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f
191 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f
192 #define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000
193 NvRegRxDeferral = 0xA4,
194 #define NVREG_RX_DEFERRAL_DEFAULT 0x16
195 NvRegMacAddrA = 0xA8,
196 NvRegMacAddrB = 0xAC,
197 NvRegMulticastAddrA = 0xB0,
198 #define NVREG_MCASTADDRA_FORCE 0x01
199 NvRegMulticastAddrB = 0xB4,
200 NvRegMulticastMaskA = 0xB8,
201 #define NVREG_MCASTMASKA_NONE 0xffffffff
202 NvRegMulticastMaskB = 0xBC,
203 #define NVREG_MCASTMASKB_NONE 0xffff
205 NvRegPhyInterface = 0xC0,
206 #define PHY_RGMII 0x10000000
207 NvRegBackOffControl = 0xC4,
208 #define NVREG_BKOFFCTRL_DEFAULT 0x70000000
209 #define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff
210 #define NVREG_BKOFFCTRL_SELECT 24
211 #define NVREG_BKOFFCTRL_GEAR 12
213 NvRegTxRingPhysAddr = 0x100,
214 NvRegRxRingPhysAddr = 0x104,
215 NvRegRingSizes = 0x108,
216 #define NVREG_RINGSZ_TXSHIFT 0
217 #define NVREG_RINGSZ_RXSHIFT 16
218 NvRegTransmitPoll = 0x10c,
219 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
220 NvRegLinkSpeed = 0x110,
221 #define NVREG_LINKSPEED_FORCE 0x10000
222 #define NVREG_LINKSPEED_10 1000
223 #define NVREG_LINKSPEED_100 100
224 #define NVREG_LINKSPEED_1000 50
225 #define NVREG_LINKSPEED_MASK (0xFFF)
226 NvRegUnknownSetupReg5 = 0x130,
227 #define NVREG_UNKSETUP5_BIT31 (1<<31)
228 NvRegTxWatermark = 0x13c,
229 #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
230 #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
231 #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
232 NvRegTxRxControl = 0x144,
233 #define NVREG_TXRXCTL_KICK 0x0001
234 #define NVREG_TXRXCTL_BIT1 0x0002
235 #define NVREG_TXRXCTL_BIT2 0x0004
236 #define NVREG_TXRXCTL_IDLE 0x0008
237 #define NVREG_TXRXCTL_RESET 0x0010
238 #define NVREG_TXRXCTL_RXCHECK 0x0400
239 #define NVREG_TXRXCTL_DESC_1 0
240 #define NVREG_TXRXCTL_DESC_2 0x002100
241 #define NVREG_TXRXCTL_DESC_3 0xc02200
242 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
243 #define NVREG_TXRXCTL_VLANINS 0x00080
244 NvRegTxRingPhysAddrHigh = 0x148,
245 NvRegRxRingPhysAddrHigh = 0x14C,
246 NvRegTxPauseFrame = 0x170,
247 #define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080
248 #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
249 #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
250 #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
251 NvRegMIIStatus = 0x180,
252 #define NVREG_MIISTAT_ERROR 0x0001
253 #define NVREG_MIISTAT_LINKCHANGE 0x0008
254 #define NVREG_MIISTAT_MASK_RW 0x0007
255 #define NVREG_MIISTAT_MASK_ALL 0x000f
256 NvRegMIIMask = 0x184,
257 #define NVREG_MII_LINKCHANGE 0x0008
259 NvRegAdapterControl = 0x188,
260 #define NVREG_ADAPTCTL_START 0x02
261 #define NVREG_ADAPTCTL_LINKUP 0x04
262 #define NVREG_ADAPTCTL_PHYVALID 0x40000
263 #define NVREG_ADAPTCTL_RUNNING 0x100000
264 #define NVREG_ADAPTCTL_PHYSHIFT 24
265 NvRegMIISpeed = 0x18c,
266 #define NVREG_MIISPEED_BIT8 (1<<8)
267 #define NVREG_MIIDELAY 5
268 NvRegMIIControl = 0x190,
269 #define NVREG_MIICTL_INUSE 0x08000
270 #define NVREG_MIICTL_WRITE 0x00400
271 #define NVREG_MIICTL_ADDRSHIFT 5
272 NvRegMIIData = 0x194,
273 NvRegWakeUpFlags = 0x200,
274 #define NVREG_WAKEUPFLAGS_VAL 0x7770
275 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
276 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
277 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
278 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
279 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
280 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
281 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
282 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
283 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
284 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
286 NvRegPatternCRC = 0x204,
287 NvRegPatternMask = 0x208,
288 NvRegPowerCap = 0x268,
289 #define NVREG_POWERCAP_D3SUPP (1<<30)
290 #define NVREG_POWERCAP_D2SUPP (1<<26)
291 #define NVREG_POWERCAP_D1SUPP (1<<25)
292 NvRegPowerState = 0x26c,
293 #define NVREG_POWERSTATE_POWEREDUP 0x8000
294 #define NVREG_POWERSTATE_VALID 0x0100
295 #define NVREG_POWERSTATE_MASK 0x0003
296 #define NVREG_POWERSTATE_D0 0x0000
297 #define NVREG_POWERSTATE_D1 0x0001
298 #define NVREG_POWERSTATE_D2 0x0002
299 #define NVREG_POWERSTATE_D3 0x0003
301 NvRegTxZeroReXmt = 0x284,
302 NvRegTxOneReXmt = 0x288,
303 NvRegTxManyReXmt = 0x28c,
304 NvRegTxLateCol = 0x290,
305 NvRegTxUnderflow = 0x294,
306 NvRegTxLossCarrier = 0x298,
307 NvRegTxExcessDef = 0x29c,
308 NvRegTxRetryErr = 0x2a0,
309 NvRegRxFrameErr = 0x2a4,
310 NvRegRxExtraByte = 0x2a8,
311 NvRegRxLateCol = 0x2ac,
313 NvRegRxFrameTooLong = 0x2b4,
314 NvRegRxOverflow = 0x2b8,
315 NvRegRxFCSErr = 0x2bc,
316 NvRegRxFrameAlignErr = 0x2c0,
317 NvRegRxLenErr = 0x2c4,
318 NvRegRxUnicast = 0x2c8,
319 NvRegRxMulticast = 0x2cc,
320 NvRegRxBroadcast = 0x2d0,
322 NvRegTxFrame = 0x2d8,
324 NvRegTxPause = 0x2e0,
325 NvRegRxPause = 0x2e4,
326 NvRegRxDropFrame = 0x2e8,
327 NvRegVlanControl = 0x300,
328 #define NVREG_VLANCONTROL_ENABLE 0x2000
329 NvRegMSIXMap0 = 0x3e0,
330 NvRegMSIXMap1 = 0x3e4,
331 NvRegMSIXIrqStatus = 0x3f0,
333 NvRegPowerState2 = 0x600,
334 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11
335 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
338 /* Big endian: should work, but is untested */
344 struct ring_desc_ex {
352 struct ring_desc* orig;
353 struct ring_desc_ex* ex;
356 #define FLAG_MASK_V1 0xffff0000
357 #define FLAG_MASK_V2 0xffffc000
358 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
359 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
361 #define NV_TX_LASTPACKET (1<<16)
362 #define NV_TX_RETRYERROR (1<<19)
363 #define NV_TX_RETRYCOUNT_MASK (0xF<<20)
364 #define NV_TX_FORCED_INTERRUPT (1<<24)
365 #define NV_TX_DEFERRED (1<<26)
366 #define NV_TX_CARRIERLOST (1<<27)
367 #define NV_TX_LATECOLLISION (1<<28)
368 #define NV_TX_UNDERFLOW (1<<29)
369 #define NV_TX_ERROR (1<<30)
370 #define NV_TX_VALID (1<<31)
372 #define NV_TX2_LASTPACKET (1<<29)
373 #define NV_TX2_RETRYERROR (1<<18)
374 #define NV_TX2_RETRYCOUNT_MASK (0xF<<19)
375 #define NV_TX2_FORCED_INTERRUPT (1<<30)
376 #define NV_TX2_DEFERRED (1<<25)
377 #define NV_TX2_CARRIERLOST (1<<26)
378 #define NV_TX2_LATECOLLISION (1<<27)
379 #define NV_TX2_UNDERFLOW (1<<28)
380 /* error and valid are the same for both */
381 #define NV_TX2_ERROR (1<<30)
382 #define NV_TX2_VALID (1<<31)
383 #define NV_TX2_TSO (1<<28)
384 #define NV_TX2_TSO_SHIFT 14
385 #define NV_TX2_TSO_MAX_SHIFT 14
386 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
387 #define NV_TX2_CHECKSUM_L3 (1<<27)
388 #define NV_TX2_CHECKSUM_L4 (1<<26)
390 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
392 #define NV_RX_DESCRIPTORVALID (1<<16)
393 #define NV_RX_MISSEDFRAME (1<<17)
394 #define NV_RX_SUBSTRACT1 (1<<18)
395 #define NV_RX_ERROR1 (1<<23)
396 #define NV_RX_ERROR2 (1<<24)
397 #define NV_RX_ERROR3 (1<<25)
398 #define NV_RX_ERROR4 (1<<26)
399 #define NV_RX_CRCERR (1<<27)
400 #define NV_RX_OVERFLOW (1<<28)
401 #define NV_RX_FRAMINGERR (1<<29)
402 #define NV_RX_ERROR (1<<30)
403 #define NV_RX_AVAIL (1<<31)
405 #define NV_RX2_CHECKSUMMASK (0x1C000000)
406 #define NV_RX2_CHECKSUM_IP (0x10000000)
407 #define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
408 #define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
409 #define NV_RX2_DESCRIPTORVALID (1<<29)
410 #define NV_RX2_SUBSTRACT1 (1<<25)
411 #define NV_RX2_ERROR1 (1<<18)
412 #define NV_RX2_ERROR2 (1<<19)
413 #define NV_RX2_ERROR3 (1<<20)
414 #define NV_RX2_ERROR4 (1<<21)
415 #define NV_RX2_CRCERR (1<<22)
416 #define NV_RX2_OVERFLOW (1<<23)
417 #define NV_RX2_FRAMINGERR (1<<24)
418 /* error and avail are the same for both */
419 #define NV_RX2_ERROR (1<<30)
420 #define NV_RX2_AVAIL (1<<31)
422 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
423 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
425 /* Miscelaneous hardware related defines: */
426 #define NV_PCI_REGSZ_VER1 0x270
427 #define NV_PCI_REGSZ_VER2 0x2d4
428 #define NV_PCI_REGSZ_VER3 0x604
430 /* various timeout delays: all in usec */
431 #define NV_TXRX_RESET_DELAY 4
432 #define NV_TXSTOP_DELAY1 10
433 #define NV_TXSTOP_DELAY1MAX 500000
434 #define NV_TXSTOP_DELAY2 100
435 #define NV_RXSTOP_DELAY1 10
436 #define NV_RXSTOP_DELAY1MAX 500000
437 #define NV_RXSTOP_DELAY2 100
438 #define NV_SETUP5_DELAY 5
439 #define NV_SETUP5_DELAYMAX 50000
440 #define NV_POWERUP_DELAY 5
441 #define NV_POWERUP_DELAYMAX 5000
442 #define NV_MIIBUSY_DELAY 50
443 #define NV_MIIPHY_DELAY 10
444 #define NV_MIIPHY_DELAYMAX 10000
445 #define NV_MAC_RESET_DELAY 64
447 #define NV_WAKEUPPATTERNS 5
448 #define NV_WAKEUPMASKENTRIES 4
450 /* General driver defaults */
451 #define NV_WATCHDOG_TIMEO (5*HZ)
453 #define RX_RING_DEFAULT 128
454 #define TX_RING_DEFAULT 256
455 #define RX_RING_MIN 128
456 #define TX_RING_MIN 64
457 #define RING_MAX_DESC_VER_1 1024
458 #define RING_MAX_DESC_VER_2_3 16384
460 /* rx/tx mac addr + type + vlan + align + slack*/
461 #define NV_RX_HEADERS (64)
462 /* even more slack. */
463 #define NV_RX_ALLOC_PAD (64)
465 /* maximum mtu size */
466 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
467 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
469 #define OOM_REFILL (1+HZ/20)
470 #define POLL_WAIT (1+HZ/100)
471 #define LINK_TIMEOUT (3*HZ)
472 #define STATS_INTERVAL (10*HZ)
476 * The nic supports three different descriptor types:
477 * - DESC_VER_1: Original
478 * - DESC_VER_2: support for jumbo frames.
479 * - DESC_VER_3: 64-bit format.
486 #define PHY_OUI_MARVELL 0x5043
487 #define PHY_OUI_CICADA 0x03f1
488 #define PHY_OUI_VITESSE 0x01c1
489 #define PHY_OUI_REALTEK 0x0732
490 #define PHYID1_OUI_MASK 0x03ff
491 #define PHYID1_OUI_SHFT 6
492 #define PHYID2_OUI_MASK 0xfc00
493 #define PHYID2_OUI_SHFT 10
494 #define PHYID2_MODEL_MASK 0x03f0
495 #define PHY_MODEL_MARVELL_E3016 0x220
496 #define PHY_MARVELL_E3016_INITMASK 0x0300
497 #define PHY_CICADA_INIT1 0x0f000
498 #define PHY_CICADA_INIT2 0x0e00
499 #define PHY_CICADA_INIT3 0x01000
500 #define PHY_CICADA_INIT4 0x0200
501 #define PHY_CICADA_INIT5 0x0004
502 #define PHY_CICADA_INIT6 0x02000
503 #define PHY_VITESSE_INIT_REG1 0x1f
504 #define PHY_VITESSE_INIT_REG2 0x10
505 #define PHY_VITESSE_INIT_REG3 0x11
506 #define PHY_VITESSE_INIT_REG4 0x12
507 #define PHY_VITESSE_INIT_MSK1 0xc
508 #define PHY_VITESSE_INIT_MSK2 0x0180
509 #define PHY_VITESSE_INIT1 0x52b5
510 #define PHY_VITESSE_INIT2 0xaf8a
511 #define PHY_VITESSE_INIT3 0x8
512 #define PHY_VITESSE_INIT4 0x8f8a
513 #define PHY_VITESSE_INIT5 0xaf86
514 #define PHY_VITESSE_INIT6 0x8f86
515 #define PHY_VITESSE_INIT7 0xaf82
516 #define PHY_VITESSE_INIT8 0x0100
517 #define PHY_VITESSE_INIT9 0x8f82
518 #define PHY_VITESSE_INIT10 0x0
519 #define PHY_REALTEK_INIT_REG1 0x1f
520 #define PHY_REALTEK_INIT_REG2 0x19
521 #define PHY_REALTEK_INIT_REG3 0x13
522 #define PHY_REALTEK_INIT1 0x0000
523 #define PHY_REALTEK_INIT2 0x8e00
524 #define PHY_REALTEK_INIT3 0x0001
525 #define PHY_REALTEK_INIT4 0xad17
527 #define PHY_GIGABIT 0x0100
529 #define PHY_TIMEOUT 0x1
530 #define PHY_ERROR 0x2
534 #define PHY_HALF 0x100
536 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
537 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
538 #define NV_PAUSEFRAME_RX_ENABLE 0x0004
539 #define NV_PAUSEFRAME_TX_ENABLE 0x0008
540 #define NV_PAUSEFRAME_RX_REQ 0x0010
541 #define NV_PAUSEFRAME_TX_REQ 0x0020
542 #define NV_PAUSEFRAME_AUTONEG 0x0040
544 /* MSI/MSI-X defines */
545 #define NV_MSI_X_MAX_VECTORS 8
546 #define NV_MSI_X_VECTORS_MASK 0x000f
547 #define NV_MSI_CAPABLE 0x0010
548 #define NV_MSI_X_CAPABLE 0x0020
549 #define NV_MSI_ENABLED 0x0040
550 #define NV_MSI_X_ENABLED 0x0080
552 #define NV_MSI_X_VECTOR_ALL 0x0
553 #define NV_MSI_X_VECTOR_RX 0x0
554 #define NV_MSI_X_VECTOR_TX 0x1
555 #define NV_MSI_X_VECTOR_OTHER 0x2
557 #define NV_RESTART_TX 0x1
558 #define NV_RESTART_RX 0x2
560 #define NV_TX_LIMIT_COUNT 16
563 struct nv_ethtool_str {
564 char name[ETH_GSTRING_LEN];
567 static const struct nv_ethtool_str nv_estats_str[] = {
572 { "tx_late_collision" },
573 { "tx_fifo_errors" },
574 { "tx_carrier_errors" },
575 { "tx_excess_deferral" },
576 { "tx_retry_error" },
577 { "rx_frame_error" },
579 { "rx_late_collision" },
581 { "rx_frame_too_long" },
582 { "rx_over_errors" },
584 { "rx_frame_align_error" },
585 { "rx_length_error" },
590 { "rx_errors_total" },
591 { "tx_errors_total" },
593 /* version 2 stats */
602 struct nv_ethtool_stats {
607 u64 tx_late_collision;
609 u64 tx_carrier_errors;
610 u64 tx_excess_deferral;
614 u64 rx_late_collision;
616 u64 rx_frame_too_long;
619 u64 rx_frame_align_error;
628 /* version 2 stats */
637 #define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
638 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
641 #define NV_TEST_COUNT_BASE 3
642 #define NV_TEST_COUNT_EXTENDED 4
644 static const struct nv_ethtool_str nv_etests_str[] = {
645 { "link (online/offline)" },
646 { "register (offline) " },
647 { "interrupt (offline) " },
648 { "loopback (offline) " }
651 struct register_test {
656 static const struct register_test nv_registers_test[] = {
657 { NvRegUnknownSetupReg6, 0x01 },
658 { NvRegMisc1, 0x03c },
659 { NvRegOffloadConfig, 0x03ff },
660 { NvRegMulticastAddrA, 0xffffffff },
661 { NvRegTxWatermark, 0x0ff },
662 { NvRegWakeUpFlags, 0x07777 },
669 unsigned int dma_len;
670 struct ring_desc_ex *first_tx_desc;
671 struct nv_skb_map *next_tx_ctx;
676 * All hardware access under dev->priv->lock, except the performance
678 * - rx is (pseudo-) lockless: it relies on the single-threading provided
679 * by the arch code for interrupts.
680 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
681 * needs dev->priv->lock :-(
682 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
685 /* in dev: base, irq */
689 struct net_device *dev;
690 struct napi_struct napi;
693 * Locking: spin_lock(&np->lock); */
694 struct nv_ethtool_stats estats;
702 unsigned int phy_oui;
703 unsigned int phy_model;
708 /* General data: RO fields */
709 dma_addr_t ring_addr;
710 struct pci_dev *pci_dev;
723 /* rx specific fields.
724 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
726 union ring_type get_rx, put_rx, first_rx, last_rx;
727 struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
728 struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
729 struct nv_skb_map *rx_skb;
731 union ring_type rx_ring;
732 unsigned int rx_buf_sz;
733 unsigned int pkt_limit;
734 struct timer_list oom_kick;
735 struct timer_list nic_poll;
736 struct timer_list stats_poll;
740 /* media detection workaround.
741 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
744 unsigned long link_timeout;
746 * tx specific fields.
748 union ring_type get_tx, put_tx, first_tx, last_tx;
749 struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
750 struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
751 struct nv_skb_map *tx_skb;
753 union ring_type tx_ring;
757 u32 tx_pkts_in_progress;
758 struct nv_skb_map *tx_change_owner;
759 struct nv_skb_map *tx_end_flip;
763 struct vlan_group *vlangrp;
765 /* msi/msi-x fields */
767 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
774 * Maximum number of loops until we assume that a bit in the irq mask
775 * is stuck. Overridable with module param.
777 static int max_interrupt_work = 5;
780 * Optimization can be either throuput mode or cpu mode
782 * Throughput Mode: Every tx and rx packet will generate an interrupt.
783 * CPU Mode: Interrupts are controlled by a timer.
786 NV_OPTIMIZATION_MODE_THROUGHPUT,
787 NV_OPTIMIZATION_MODE_CPU
789 static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
792 * Poll interval for timer irq
794 * This interval determines how frequent an interrupt is generated.
795 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
796 * Min = 0, and Max = 65535
798 static int poll_interval = -1;
807 static int msi = NV_MSI_INT_ENABLED;
813 NV_MSIX_INT_DISABLED,
816 static int msix = NV_MSIX_INT_DISABLED;
822 NV_DMA_64BIT_DISABLED,
825 static int dma_64bit = NV_DMA_64BIT_ENABLED;
827 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
829 return netdev_priv(dev);
832 static inline u8 __iomem *get_hwbase(struct net_device *dev)
834 return ((struct fe_priv *)netdev_priv(dev))->base;
837 static inline void pci_push(u8 __iomem *base)
839 /* force out pending posted writes */
843 static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
845 return le32_to_cpu(prd->flaglen)
846 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
849 static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
851 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
854 static bool nv_optimized(struct fe_priv *np)
856 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
861 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
862 int delay, int delaymax, const char *msg)
864 u8 __iomem *base = get_hwbase(dev);
875 } while ((readl(base + offset) & mask) != target);
879 #define NV_SETUP_RX_RING 0x01
880 #define NV_SETUP_TX_RING 0x02
882 static inline u32 dma_low(dma_addr_t addr)
887 static inline u32 dma_high(dma_addr_t addr)
889 return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */
892 static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
894 struct fe_priv *np = get_nvpriv(dev);
895 u8 __iomem *base = get_hwbase(dev);
897 if (!nv_optimized(np)) {
898 if (rxtx_flags & NV_SETUP_RX_RING) {
899 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
901 if (rxtx_flags & NV_SETUP_TX_RING) {
902 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
905 if (rxtx_flags & NV_SETUP_RX_RING) {
906 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
907 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
909 if (rxtx_flags & NV_SETUP_TX_RING) {
910 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
911 writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
916 static void free_rings(struct net_device *dev)
918 struct fe_priv *np = get_nvpriv(dev);
920 if (!nv_optimized(np)) {
921 if (np->rx_ring.orig)
922 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
923 np->rx_ring.orig, np->ring_addr);
926 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
927 np->rx_ring.ex, np->ring_addr);
935 static int using_multi_irqs(struct net_device *dev)
937 struct fe_priv *np = get_nvpriv(dev);
939 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
940 ((np->msi_flags & NV_MSI_X_ENABLED) &&
941 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
947 static void nv_enable_irq(struct net_device *dev)
949 struct fe_priv *np = get_nvpriv(dev);
951 if (!using_multi_irqs(dev)) {
952 if (np->msi_flags & NV_MSI_X_ENABLED)
953 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
955 enable_irq(np->pci_dev->irq);
957 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
958 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
959 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
963 static void nv_disable_irq(struct net_device *dev)
965 struct fe_priv *np = get_nvpriv(dev);
967 if (!using_multi_irqs(dev)) {
968 if (np->msi_flags & NV_MSI_X_ENABLED)
969 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
971 disable_irq(np->pci_dev->irq);
973 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
974 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
975 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
979 /* In MSIX mode, a write to irqmask behaves as XOR */
980 static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
982 u8 __iomem *base = get_hwbase(dev);
984 writel(mask, base + NvRegIrqMask);
987 static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
989 struct fe_priv *np = get_nvpriv(dev);
990 u8 __iomem *base = get_hwbase(dev);
992 if (np->msi_flags & NV_MSI_X_ENABLED) {
993 writel(mask, base + NvRegIrqMask);
995 if (np->msi_flags & NV_MSI_ENABLED)
996 writel(0, base + NvRegMSIIrqMask);
997 writel(0, base + NvRegIrqMask);
1001 #define MII_READ (-1)
1002 /* mii_rw: read/write a register on the PHY.
1004 * Caller must guarantee serialization
1006 static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1008 u8 __iomem *base = get_hwbase(dev);
1012 writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
1014 reg = readl(base + NvRegMIIControl);
1015 if (reg & NVREG_MIICTL_INUSE) {
1016 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1017 udelay(NV_MIIBUSY_DELAY);
1020 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
1021 if (value != MII_READ) {
1022 writel(value, base + NvRegMIIData);
1023 reg |= NVREG_MIICTL_WRITE;
1025 writel(reg, base + NvRegMIIControl);
1027 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1028 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
1029 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
1030 dev->name, miireg, addr);
1032 } else if (value != MII_READ) {
1033 /* it was a write operation - fewer failures are detectable */
1034 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
1035 dev->name, value, miireg, addr);
1037 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1038 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
1039 dev->name, miireg, addr);
1042 retval = readl(base + NvRegMIIData);
1043 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
1044 dev->name, miireg, addr, retval);
1050 static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1052 struct fe_priv *np = netdev_priv(dev);
1054 unsigned int tries = 0;
1056 miicontrol = BMCR_RESET | bmcr_setup;
1057 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
1061 /* wait for 500ms */
1064 /* must wait till reset is deasserted */
1065 while (miicontrol & BMCR_RESET) {
1067 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1068 /* FIXME: 100 tries seem excessive */
1075 static int phy_init(struct net_device *dev)
1077 struct fe_priv *np = get_nvpriv(dev);
1078 u8 __iomem *base = get_hwbase(dev);
1079 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
1081 /* phy errata for E3016 phy */
1082 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1083 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1084 reg &= ~PHY_MARVELL_E3016_INITMASK;
1085 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1086 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
1090 if (np->phy_oui == PHY_OUI_REALTEK) {
1091 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1092 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1095 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1096 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1099 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1100 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1103 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
1104 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1107 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1108 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1113 /* set advertise register */
1114 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1115 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
1116 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1117 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
1121 /* get phy interface type */
1122 phyinterface = readl(base + NvRegPhyInterface);
1124 /* see if gigabit phy */
1125 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1126 if (mii_status & PHY_GIGABIT) {
1127 np->gigabit = PHY_GIGABIT;
1128 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
1129 mii_control_1000 &= ~ADVERTISE_1000HALF;
1130 if (phyinterface & PHY_RGMII)
1131 mii_control_1000 |= ADVERTISE_1000FULL;
1133 mii_control_1000 &= ~ADVERTISE_1000FULL;
1135 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1136 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1143 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1144 mii_control |= BMCR_ANENABLE;
1147 * (certain phys need bmcr to be setup with reset)
1149 if (phy_reset(dev, mii_control)) {
1150 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
1154 /* phy vendor specific configuration */
1155 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
1156 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1157 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1158 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1159 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
1160 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1163 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1164 phy_reserved |= PHY_CICADA_INIT5;
1165 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
1166 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1170 if (np->phy_oui == PHY_OUI_CICADA) {
1171 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1172 phy_reserved |= PHY_CICADA_INIT6;
1173 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
1174 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1178 if (np->phy_oui == PHY_OUI_VITESSE) {
1179 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
1180 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1183 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
1184 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1187 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1188 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1189 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1192 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1193 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1194 phy_reserved |= PHY_VITESSE_INIT3;
1195 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1196 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1199 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
1200 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1203 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
1204 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1207 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1208 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1209 phy_reserved |= PHY_VITESSE_INIT3;
1210 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1211 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1214 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1215 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1216 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1219 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) {
1220 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1223 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {
1224 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1227 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1228 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1229 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1232 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1233 phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1234 phy_reserved |= PHY_VITESSE_INIT8;
1235 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1236 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1239 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
1240 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1243 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
1244 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1248 if (np->phy_oui == PHY_OUI_REALTEK) {
1249 /* reset could have cleared these out, set them back */
1250 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1251 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1254 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1255 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1258 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1259 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1262 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
1263 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1266 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1267 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1272 /* some phys clear out pause advertisment on reset, set it back */
1273 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1275 /* restart auto negotiation */
1276 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1277 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1278 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1285 static void nv_start_rx(struct net_device *dev)
1287 struct fe_priv *np = netdev_priv(dev);
1288 u8 __iomem *base = get_hwbase(dev);
1289 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1291 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
1292 /* Already running? Stop it. */
1293 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1294 rx_ctrl &= ~NVREG_RCVCTL_START;
1295 writel(rx_ctrl, base + NvRegReceiverControl);
1298 writel(np->linkspeed, base + NvRegLinkSpeed);
1300 rx_ctrl |= NVREG_RCVCTL_START;
1302 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1303 writel(rx_ctrl, base + NvRegReceiverControl);
1304 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1305 dev->name, np->duplex, np->linkspeed);
1309 static void nv_stop_rx(struct net_device *dev)
1311 struct fe_priv *np = netdev_priv(dev);
1312 u8 __iomem *base = get_hwbase(dev);
1313 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1315 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
1316 if (!np->mac_in_use)
1317 rx_ctrl &= ~NVREG_RCVCTL_START;
1319 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1320 writel(rx_ctrl, base + NvRegReceiverControl);
1321 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1322 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
1323 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
1325 udelay(NV_RXSTOP_DELAY2);
1326 if (!np->mac_in_use)
1327 writel(0, base + NvRegLinkSpeed);
1330 static void nv_start_tx(struct net_device *dev)
1332 struct fe_priv *np = netdev_priv(dev);
1333 u8 __iomem *base = get_hwbase(dev);
1334 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1336 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
1337 tx_ctrl |= NVREG_XMITCTL_START;
1339 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1340 writel(tx_ctrl, base + NvRegTransmitterControl);
1344 static void nv_stop_tx(struct net_device *dev)
1346 struct fe_priv *np = netdev_priv(dev);
1347 u8 __iomem *base = get_hwbase(dev);
1348 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1350 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
1351 if (!np->mac_in_use)
1352 tx_ctrl &= ~NVREG_XMITCTL_START;
1354 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1355 writel(tx_ctrl, base + NvRegTransmitterControl);
1356 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1357 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
1358 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
1360 udelay(NV_TXSTOP_DELAY2);
1361 if (!np->mac_in_use)
1362 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
1363 base + NvRegTransmitPoll);
1366 static void nv_start_rxtx(struct net_device *dev)
1372 static void nv_stop_rxtx(struct net_device *dev)
1378 static void nv_txrx_reset(struct net_device *dev)
1380 struct fe_priv *np = netdev_priv(dev);
1381 u8 __iomem *base = get_hwbase(dev);
1383 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
1384 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1386 udelay(NV_TXRX_RESET_DELAY);
1387 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1391 static void nv_mac_reset(struct net_device *dev)
1393 struct fe_priv *np = netdev_priv(dev);
1394 u8 __iomem *base = get_hwbase(dev);
1395 u32 temp1, temp2, temp3;
1397 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
1399 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1402 /* save registers since they will be cleared on reset */
1403 temp1 = readl(base + NvRegMacAddrA);
1404 temp2 = readl(base + NvRegMacAddrB);
1405 temp3 = readl(base + NvRegTransmitPoll);
1407 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1409 udelay(NV_MAC_RESET_DELAY);
1410 writel(0, base + NvRegMacReset);
1412 udelay(NV_MAC_RESET_DELAY);
1414 /* restore saved registers */
1415 writel(temp1, base + NvRegMacAddrA);
1416 writel(temp2, base + NvRegMacAddrB);
1417 writel(temp3, base + NvRegTransmitPoll);
1419 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1423 static void nv_get_hw_stats(struct net_device *dev)
1425 struct fe_priv *np = netdev_priv(dev);
1426 u8 __iomem *base = get_hwbase(dev);
1428 np->estats.tx_bytes += readl(base + NvRegTxCnt);
1429 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1430 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1431 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1432 np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1433 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1434 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1435 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1436 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1437 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1438 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1439 np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1440 np->estats.rx_runt += readl(base + NvRegRxRunt);
1441 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1442 np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1443 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1444 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1445 np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1446 np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1447 np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1448 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1449 np->estats.rx_packets =
1450 np->estats.rx_unicast +
1451 np->estats.rx_multicast +
1452 np->estats.rx_broadcast;
1453 np->estats.rx_errors_total =
1454 np->estats.rx_crc_errors +
1455 np->estats.rx_over_errors +
1456 np->estats.rx_frame_error +
1457 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1458 np->estats.rx_late_collision +
1459 np->estats.rx_runt +
1460 np->estats.rx_frame_too_long;
1461 np->estats.tx_errors_total =
1462 np->estats.tx_late_collision +
1463 np->estats.tx_fifo_errors +
1464 np->estats.tx_carrier_errors +
1465 np->estats.tx_excess_deferral +
1466 np->estats.tx_retry_error;
1468 if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1469 np->estats.tx_deferral += readl(base + NvRegTxDef);
1470 np->estats.tx_packets += readl(base + NvRegTxFrame);
1471 np->estats.rx_bytes += readl(base + NvRegRxCnt);
1472 np->estats.tx_pause += readl(base + NvRegTxPause);
1473 np->estats.rx_pause += readl(base + NvRegRxPause);
1474 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1479 * nv_get_stats: dev->get_stats function
1480 * Get latest stats value from the nic.
1481 * Called with read_lock(&dev_base_lock) held for read -
1482 * only synchronized against unregister_netdevice.
1484 static struct net_device_stats *nv_get_stats(struct net_device *dev)
1486 struct fe_priv *np = netdev_priv(dev);
1488 /* If the nic supports hw counters then retrieve latest values */
1489 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) {
1490 nv_get_hw_stats(dev);
1492 /* copy to net_device stats */
1493 dev->stats.tx_bytes = np->estats.tx_bytes;
1494 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
1495 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
1496 dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
1497 dev->stats.rx_over_errors = np->estats.rx_over_errors;
1498 dev->stats.rx_errors = np->estats.rx_errors_total;
1499 dev->stats.tx_errors = np->estats.tx_errors_total;
1506 * nv_alloc_rx: fill rx ring entries.
1507 * Return 1 if the allocations for the skbs failed and the
1508 * rx engine is without Available descriptors
1510 static int nv_alloc_rx(struct net_device *dev)
1512 struct fe_priv *np = netdev_priv(dev);
1513 struct ring_desc* less_rx;
1515 less_rx = np->get_rx.orig;
1516 if (less_rx-- == np->first_rx.orig)
1517 less_rx = np->last_rx.orig;
1519 while (np->put_rx.orig != less_rx) {
1520 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1522 np->put_rx_ctx->skb = skb;
1523 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1526 PCI_DMA_FROMDEVICE);
1527 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1528 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1530 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1531 if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
1532 np->put_rx.orig = np->first_rx.orig;
1533 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1534 np->put_rx_ctx = np->first_rx_ctx;
1542 static int nv_alloc_rx_optimized(struct net_device *dev)
1544 struct fe_priv *np = netdev_priv(dev);
1545 struct ring_desc_ex* less_rx;
1547 less_rx = np->get_rx.ex;
1548 if (less_rx-- == np->first_rx.ex)
1549 less_rx = np->last_rx.ex;
1551 while (np->put_rx.ex != less_rx) {
1552 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1554 np->put_rx_ctx->skb = skb;
1555 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1558 PCI_DMA_FROMDEVICE);
1559 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1560 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
1561 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
1563 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1564 if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
1565 np->put_rx.ex = np->first_rx.ex;
1566 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1567 np->put_rx_ctx = np->first_rx_ctx;
1575 /* If rx bufs are exhausted called after 50ms to attempt to refresh */
1576 #ifdef CONFIG_FORCEDETH_NAPI
1577 static void nv_do_rx_refill(unsigned long data)
1579 struct net_device *dev = (struct net_device *) data;
1580 struct fe_priv *np = netdev_priv(dev);
1582 /* Just reschedule NAPI rx processing */
1583 netif_rx_schedule(dev, &np->napi);
1586 static void nv_do_rx_refill(unsigned long data)
1588 struct net_device *dev = (struct net_device *) data;
1589 struct fe_priv *np = netdev_priv(dev);
1592 if (!using_multi_irqs(dev)) {
1593 if (np->msi_flags & NV_MSI_X_ENABLED)
1594 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1596 disable_irq(np->pci_dev->irq);
1598 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1600 if (!nv_optimized(np))
1601 retcode = nv_alloc_rx(dev);
1603 retcode = nv_alloc_rx_optimized(dev);
1605 spin_lock_irq(&np->lock);
1606 if (!np->in_shutdown)
1607 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1608 spin_unlock_irq(&np->lock);
1610 if (!using_multi_irqs(dev)) {
1611 if (np->msi_flags & NV_MSI_X_ENABLED)
1612 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1614 enable_irq(np->pci_dev->irq);
1616 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1621 static void nv_init_rx(struct net_device *dev)
1623 struct fe_priv *np = netdev_priv(dev);
1626 np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1628 if (!nv_optimized(np))
1629 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1631 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1632 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
1633 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1635 for (i = 0; i < np->rx_ring_size; i++) {
1636 if (!nv_optimized(np)) {
1637 np->rx_ring.orig[i].flaglen = 0;
1638 np->rx_ring.orig[i].buf = 0;
1640 np->rx_ring.ex[i].flaglen = 0;
1641 np->rx_ring.ex[i].txvlan = 0;
1642 np->rx_ring.ex[i].bufhigh = 0;
1643 np->rx_ring.ex[i].buflow = 0;
1645 np->rx_skb[i].skb = NULL;
1646 np->rx_skb[i].dma = 0;
1650 static void nv_init_tx(struct net_device *dev)
1652 struct fe_priv *np = netdev_priv(dev);
1655 np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1657 if (!nv_optimized(np))
1658 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1660 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1661 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
1662 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1663 np->tx_pkts_in_progress = 0;
1664 np->tx_change_owner = NULL;
1665 np->tx_end_flip = NULL;
1667 for (i = 0; i < np->tx_ring_size; i++) {
1668 if (!nv_optimized(np)) {
1669 np->tx_ring.orig[i].flaglen = 0;
1670 np->tx_ring.orig[i].buf = 0;
1672 np->tx_ring.ex[i].flaglen = 0;
1673 np->tx_ring.ex[i].txvlan = 0;
1674 np->tx_ring.ex[i].bufhigh = 0;
1675 np->tx_ring.ex[i].buflow = 0;
1677 np->tx_skb[i].skb = NULL;
1678 np->tx_skb[i].dma = 0;
1679 np->tx_skb[i].dma_len = 0;
1680 np->tx_skb[i].first_tx_desc = NULL;
1681 np->tx_skb[i].next_tx_ctx = NULL;
1685 static int nv_init_ring(struct net_device *dev)
1687 struct fe_priv *np = netdev_priv(dev);
1692 if (!nv_optimized(np))
1693 return nv_alloc_rx(dev);
1695 return nv_alloc_rx_optimized(dev);
1698 static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb)
1700 struct fe_priv *np = netdev_priv(dev);
1703 pci_unmap_page(np->pci_dev, tx_skb->dma,
1709 dev_kfree_skb_any(tx_skb->skb);
1717 static void nv_drain_tx(struct net_device *dev)
1719 struct fe_priv *np = netdev_priv(dev);
1722 for (i = 0; i < np->tx_ring_size; i++) {
1723 if (!nv_optimized(np)) {
1724 np->tx_ring.orig[i].flaglen = 0;
1725 np->tx_ring.orig[i].buf = 0;
1727 np->tx_ring.ex[i].flaglen = 0;
1728 np->tx_ring.ex[i].txvlan = 0;
1729 np->tx_ring.ex[i].bufhigh = 0;
1730 np->tx_ring.ex[i].buflow = 0;
1732 if (nv_release_txskb(dev, &np->tx_skb[i]))
1733 dev->stats.tx_dropped++;
1734 np->tx_skb[i].dma = 0;
1735 np->tx_skb[i].dma_len = 0;
1736 np->tx_skb[i].first_tx_desc = NULL;
1737 np->tx_skb[i].next_tx_ctx = NULL;
1739 np->tx_pkts_in_progress = 0;
1740 np->tx_change_owner = NULL;
1741 np->tx_end_flip = NULL;
1744 static void nv_drain_rx(struct net_device *dev)
1746 struct fe_priv *np = netdev_priv(dev);
1749 for (i = 0; i < np->rx_ring_size; i++) {
1750 if (!nv_optimized(np)) {
1751 np->rx_ring.orig[i].flaglen = 0;
1752 np->rx_ring.orig[i].buf = 0;
1754 np->rx_ring.ex[i].flaglen = 0;
1755 np->rx_ring.ex[i].txvlan = 0;
1756 np->rx_ring.ex[i].bufhigh = 0;
1757 np->rx_ring.ex[i].buflow = 0;
1760 if (np->rx_skb[i].skb) {
1761 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
1762 (skb_end_pointer(np->rx_skb[i].skb) -
1763 np->rx_skb[i].skb->data),
1764 PCI_DMA_FROMDEVICE);
1765 dev_kfree_skb(np->rx_skb[i].skb);
1766 np->rx_skb[i].skb = NULL;
1771 static void nv_drain_rxtx(struct net_device *dev)
1777 static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
1779 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
1782 static void nv_legacybackoff_reseed(struct net_device *dev)
1784 u8 __iomem *base = get_hwbase(dev);
1789 reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK;
1790 get_random_bytes(&low, sizeof(low));
1791 reg |= low & NVREG_SLOTTIME_MASK;
1793 /* Need to stop tx before change takes effect.
1794 * Caller has already gained np->lock.
1796 tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START;
1800 writel(reg, base + NvRegSlotTime);
1806 /* Gear Backoff Seeds */
1807 #define BACKOFF_SEEDSET_ROWS 8
1808 #define BACKOFF_SEEDSET_LFSRS 15
1810 /* Known Good seed sets */
1811 static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
1812 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
1813 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
1814 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
1815 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
1816 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
1817 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
1818 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
1819 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}};
1821 static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
1822 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
1823 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
1824 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
1825 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
1826 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
1827 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
1828 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
1829 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}};
1831 static void nv_gear_backoff_reseed(struct net_device *dev)
1833 u8 __iomem *base = get_hwbase(dev);
1834 u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed;
1835 u32 temp, seedset, combinedSeed;
1838 /* Setup seed for free running LFSR */
1839 /* We are going to read the time stamp counter 3 times
1840 and swizzle bits around to increase randomness */
1841 get_random_bytes(&miniseed1, sizeof(miniseed1));
1842 miniseed1 &= 0x0fff;
1846 get_random_bytes(&miniseed2, sizeof(miniseed2));
1847 miniseed2 &= 0x0fff;
1850 miniseed2_reversed =
1851 ((miniseed2 & 0xF00) >> 8) |
1852 (miniseed2 & 0x0F0) |
1853 ((miniseed2 & 0x00F) << 8);
1855 get_random_bytes(&miniseed3, sizeof(miniseed3));
1856 miniseed3 &= 0x0fff;
1859 miniseed3_reversed =
1860 ((miniseed3 & 0xF00) >> 8) |
1861 (miniseed3 & 0x0F0) |
1862 ((miniseed3 & 0x00F) << 8);
1864 combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) |
1865 (miniseed2 ^ miniseed3_reversed);
1867 /* Seeds can not be zero */
1868 if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0)
1869 combinedSeed |= 0x08;
1870 if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0)
1871 combinedSeed |= 0x8000;
1873 /* No need to disable tx here */
1874 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
1875 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
1876 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
1877 writel(temp,base + NvRegBackOffControl);
1879 /* Setup seeds for all gear LFSRs. */
1880 get_random_bytes(&seedset, sizeof(seedset));
1881 seedset = seedset % BACKOFF_SEEDSET_ROWS;
1882 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++)
1884 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
1885 temp |= main_seedset[seedset][i-1] & 0x3ff;
1886 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
1887 writel(temp, base + NvRegBackOffControl);
1892 * nv_start_xmit: dev->hard_start_xmit function
1893 * Called with netif_tx_lock held.
1895 static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1897 struct fe_priv *np = netdev_priv(dev);
1899 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
1900 unsigned int fragments = skb_shinfo(skb)->nr_frags;
1904 u32 size = skb->len-skb->data_len;
1905 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1907 struct ring_desc* put_tx;
1908 struct ring_desc* start_tx;
1909 struct ring_desc* prev_tx;
1910 struct nv_skb_map* prev_tx_ctx;
1911 unsigned long flags;
1913 /* add fragments to entries count */
1914 for (i = 0; i < fragments; i++) {
1915 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
1916 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1919 empty_slots = nv_get_empty_tx_slots(np);
1920 if (unlikely(empty_slots <= entries)) {
1921 spin_lock_irqsave(&np->lock, flags);
1922 netif_stop_queue(dev);
1924 spin_unlock_irqrestore(&np->lock, flags);
1925 return NETDEV_TX_BUSY;
1928 start_tx = put_tx = np->put_tx.orig;
1930 /* setup the header buffer */
1933 prev_tx_ctx = np->put_tx_ctx;
1934 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1935 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1937 np->put_tx_ctx->dma_len = bcnt;
1938 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
1939 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1941 tx_flags = np->tx_flags;
1944 if (unlikely(put_tx++ == np->last_tx.orig))
1945 put_tx = np->first_tx.orig;
1946 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1947 np->put_tx_ctx = np->first_tx_ctx;
1950 /* setup the fragments */
1951 for (i = 0; i < fragments; i++) {
1952 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1953 u32 size = frag->size;
1958 prev_tx_ctx = np->put_tx_ctx;
1959 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1960 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1962 np->put_tx_ctx->dma_len = bcnt;
1963 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
1964 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1968 if (unlikely(put_tx++ == np->last_tx.orig))
1969 put_tx = np->first_tx.orig;
1970 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1971 np->put_tx_ctx = np->first_tx_ctx;
1975 /* set last fragment flag */
1976 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
1978 /* save skb in this slot's context area */
1979 prev_tx_ctx->skb = skb;
1981 if (skb_is_gso(skb))
1982 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
1984 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1985 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
1987 spin_lock_irqsave(&np->lock, flags);
1990 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1991 np->put_tx.orig = put_tx;
1993 spin_unlock_irqrestore(&np->lock, flags);
1995 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
1996 dev->name, entries, tx_flags_extra);
1999 for (j=0; j<64; j++) {
2001 dprintk("\n%03x:", j);
2002 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2007 dev->trans_start = jiffies;
2008 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2009 return NETDEV_TX_OK;
2012 static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
2014 struct fe_priv *np = netdev_priv(dev);
2017 unsigned int fragments = skb_shinfo(skb)->nr_frags;
2021 u32 size = skb->len-skb->data_len;
2022 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2024 struct ring_desc_ex* put_tx;
2025 struct ring_desc_ex* start_tx;
2026 struct ring_desc_ex* prev_tx;
2027 struct nv_skb_map* prev_tx_ctx;
2028 struct nv_skb_map* start_tx_ctx;
2029 unsigned long flags;
2031 /* add fragments to entries count */
2032 for (i = 0; i < fragments; i++) {
2033 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
2034 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2037 empty_slots = nv_get_empty_tx_slots(np);
2038 if (unlikely(empty_slots <= entries)) {
2039 spin_lock_irqsave(&np->lock, flags);
2040 netif_stop_queue(dev);
2042 spin_unlock_irqrestore(&np->lock, flags);
2043 return NETDEV_TX_BUSY;
2046 start_tx = put_tx = np->put_tx.ex;
2047 start_tx_ctx = np->put_tx_ctx;
2049 /* setup the header buffer */
2052 prev_tx_ctx = np->put_tx_ctx;
2053 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2054 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2056 np->put_tx_ctx->dma_len = bcnt;
2057 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2058 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2059 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2061 tx_flags = NV_TX2_VALID;
2064 if (unlikely(put_tx++ == np->last_tx.ex))
2065 put_tx = np->first_tx.ex;
2066 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2067 np->put_tx_ctx = np->first_tx_ctx;
2070 /* setup the fragments */
2071 for (i = 0; i < fragments; i++) {
2072 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2073 u32 size = frag->size;
2078 prev_tx_ctx = np->put_tx_ctx;
2079 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2080 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
2082 np->put_tx_ctx->dma_len = bcnt;
2083 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2084 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2085 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2089 if (unlikely(put_tx++ == np->last_tx.ex))
2090 put_tx = np->first_tx.ex;
2091 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2092 np->put_tx_ctx = np->first_tx_ctx;
2096 /* set last fragment flag */
2097 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
2099 /* save skb in this slot's context area */
2100 prev_tx_ctx->skb = skb;
2102 if (skb_is_gso(skb))
2103 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2105 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2106 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2109 if (likely(!np->vlangrp)) {
2110 start_tx->txvlan = 0;
2112 if (vlan_tx_tag_present(skb))
2113 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb));
2115 start_tx->txvlan = 0;
2118 spin_lock_irqsave(&np->lock, flags);
2121 /* Limit the number of outstanding tx. Setup all fragments, but
2122 * do not set the VALID bit on the first descriptor. Save a pointer
2123 * to that descriptor and also for next skb_map element.
2126 if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) {
2127 if (!np->tx_change_owner)
2128 np->tx_change_owner = start_tx_ctx;
2130 /* remove VALID bit */
2131 tx_flags &= ~NV_TX2_VALID;
2132 start_tx_ctx->first_tx_desc = start_tx;
2133 start_tx_ctx->next_tx_ctx = np->put_tx_ctx;
2134 np->tx_end_flip = np->put_tx_ctx;
2136 np->tx_pkts_in_progress++;
2141 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2142 np->put_tx.ex = put_tx;
2144 spin_unlock_irqrestore(&np->lock, flags);
2146 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
2147 dev->name, entries, tx_flags_extra);
2150 for (j=0; j<64; j++) {
2152 dprintk("\n%03x:", j);
2153 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2158 dev->trans_start = jiffies;
2159 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2160 return NETDEV_TX_OK;
2163 static inline void nv_tx_flip_ownership(struct net_device *dev)
2165 struct fe_priv *np = netdev_priv(dev);
2167 np->tx_pkts_in_progress--;
2168 if (np->tx_change_owner) {
2169 np->tx_change_owner->first_tx_desc->flaglen |=
2170 cpu_to_le32(NV_TX2_VALID);
2171 np->tx_pkts_in_progress++;
2173 np->tx_change_owner = np->tx_change_owner->next_tx_ctx;
2174 if (np->tx_change_owner == np->tx_end_flip)
2175 np->tx_change_owner = NULL;
2177 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2182 * nv_tx_done: check for completed packets, release the skbs.
2184 * Caller must own np->lock.
2186 static void nv_tx_done(struct net_device *dev)
2188 struct fe_priv *np = netdev_priv(dev);
2190 struct ring_desc* orig_get_tx = np->get_tx.orig;
2192 while ((np->get_tx.orig != np->put_tx.orig) &&
2193 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID)) {
2195 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
2198 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
2199 np->get_tx_ctx->dma_len,
2201 np->get_tx_ctx->dma = 0;
2203 if (np->desc_ver == DESC_VER_1) {
2204 if (flags & NV_TX_LASTPACKET) {
2205 if (flags & NV_TX_ERROR) {
2206 if (flags & NV_TX_UNDERFLOW)
2207 dev->stats.tx_fifo_errors++;
2208 if (flags & NV_TX_CARRIERLOST)
2209 dev->stats.tx_carrier_errors++;
2210 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
2211 nv_legacybackoff_reseed(dev);
2212 dev->stats.tx_errors++;
2214 dev->stats.tx_packets++;
2215 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2217 dev_kfree_skb_any(np->get_tx_ctx->skb);
2218 np->get_tx_ctx->skb = NULL;
2221 if (flags & NV_TX2_LASTPACKET) {
2222 if (flags & NV_TX2_ERROR) {
2223 if (flags & NV_TX2_UNDERFLOW)
2224 dev->stats.tx_fifo_errors++;
2225 if (flags & NV_TX2_CARRIERLOST)
2226 dev->stats.tx_carrier_errors++;
2227 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
2228 nv_legacybackoff_reseed(dev);
2229 dev->stats.tx_errors++;
2231 dev->stats.tx_packets++;
2232 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2234 dev_kfree_skb_any(np->get_tx_ctx->skb);
2235 np->get_tx_ctx->skb = NULL;
2238 if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
2239 np->get_tx.orig = np->first_tx.orig;
2240 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2241 np->get_tx_ctx = np->first_tx_ctx;
2243 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
2245 netif_wake_queue(dev);
2249 static void nv_tx_done_optimized(struct net_device *dev, int limit)
2251 struct fe_priv *np = netdev_priv(dev);
2253 struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
2255 while ((np->get_tx.ex != np->put_tx.ex) &&
2256 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) &&
2259 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
2262 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
2263 np->get_tx_ctx->dma_len,
2265 np->get_tx_ctx->dma = 0;
2267 if (flags & NV_TX2_LASTPACKET) {
2268 if (!(flags & NV_TX2_ERROR))
2269 dev->stats.tx_packets++;
2271 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
2272 if (np->driver_data & DEV_HAS_GEAR_MODE)
2273 nv_gear_backoff_reseed(dev);
2275 nv_legacybackoff_reseed(dev);
2279 dev_kfree_skb_any(np->get_tx_ctx->skb);
2280 np->get_tx_ctx->skb = NULL;
2283 nv_tx_flip_ownership(dev);
2286 if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2287 np->get_tx.ex = np->first_tx.ex;
2288 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2289 np->get_tx_ctx = np->first_tx_ctx;
2291 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
2293 netif_wake_queue(dev);
2298 * nv_tx_timeout: dev->tx_timeout function
2299 * Called with netif_tx_lock held.
2301 static void nv_tx_timeout(struct net_device *dev)
2303 struct fe_priv *np = netdev_priv(dev);
2304 u8 __iomem *base = get_hwbase(dev);
2307 if (np->msi_flags & NV_MSI_X_ENABLED)
2308 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2310 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2312 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
2317 printk(KERN_INFO "%s: Ring at %lx\n",
2318 dev->name, (unsigned long)np->ring_addr);
2319 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
2320 for (i=0;i<=np->register_size;i+= 32) {
2321 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2323 readl(base + i + 0), readl(base + i + 4),
2324 readl(base + i + 8), readl(base + i + 12),
2325 readl(base + i + 16), readl(base + i + 20),
2326 readl(base + i + 24), readl(base + i + 28));
2328 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
2329 for (i=0;i<np->tx_ring_size;i+= 4) {
2330 if (!nv_optimized(np)) {
2331 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2333 le32_to_cpu(np->tx_ring.orig[i].buf),
2334 le32_to_cpu(np->tx_ring.orig[i].flaglen),
2335 le32_to_cpu(np->tx_ring.orig[i+1].buf),
2336 le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
2337 le32_to_cpu(np->tx_ring.orig[i+2].buf),
2338 le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
2339 le32_to_cpu(np->tx_ring.orig[i+3].buf),
2340 le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
2342 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
2344 le32_to_cpu(np->tx_ring.ex[i].bufhigh),
2345 le32_to_cpu(np->tx_ring.ex[i].buflow),
2346 le32_to_cpu(np->tx_ring.ex[i].flaglen),
2347 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
2348 le32_to_cpu(np->tx_ring.ex[i+1].buflow),
2349 le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
2350 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
2351 le32_to_cpu(np->tx_ring.ex[i+2].buflow),
2352 le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
2353 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
2354 le32_to_cpu(np->tx_ring.ex[i+3].buflow),
2355 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2360 spin_lock_irq(&np->lock);
2362 /* 1) stop tx engine */
2365 /* 2) check that the packets were not sent already: */
2366 if (!nv_optimized(np))
2369 nv_tx_done_optimized(dev, np->tx_ring_size);
2371 /* 3) if there are dead entries: clear everything */
2372 if (np->get_tx_ctx != np->put_tx_ctx) {
2373 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
2376 setup_hw_rings(dev, NV_SETUP_TX_RING);
2379 netif_wake_queue(dev);
2381 /* 4) restart tx engine */
2383 spin_unlock_irq(&np->lock);
2387 * Called when the nic notices a mismatch between the actual data len on the
2388 * wire and the len indicated in the 802 header
2390 static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2392 int hdrlen; /* length of the 802 header */
2393 int protolen; /* length as stored in the proto field */
2395 /* 1) calculate len according to header */
2396 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
2397 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
2400 protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
2403 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
2404 dev->name, datalen, protolen, hdrlen);
2405 if (protolen > ETH_DATA_LEN)
2406 return datalen; /* Value in proto field not a len, no checks possible */
2409 /* consistency checks: */
2410 if (datalen > ETH_ZLEN) {
2411 if (datalen >= protolen) {
2412 /* more data on wire than in 802 header, trim of
2415 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2416 dev->name, protolen);
2419 /* less data on wire than mentioned in header.
2420 * Discard the packet.
2422 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
2427 /* short packet. Accept only if 802 values are also short */
2428 if (protolen > ETH_ZLEN) {
2429 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
2433 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2434 dev->name, datalen);
2439 static int nv_rx_process(struct net_device *dev, int limit)
2441 struct fe_priv *np = netdev_priv(dev);
2444 struct sk_buff *skb;
2447 while((np->get_rx.orig != np->put_rx.orig) &&
2448 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2449 (rx_work < limit)) {
2451 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
2455 * the packet is for us - immediately tear down the pci mapping.
2456 * TODO: check if a prefetch of the first cacheline improves
2459 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2460 np->get_rx_ctx->dma_len,
2461 PCI_DMA_FROMDEVICE);
2462 skb = np->get_rx_ctx->skb;
2463 np->get_rx_ctx->skb = NULL;
2467 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2468 for (j=0; j<64; j++) {
2470 dprintk("\n%03x:", j);
2471 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2475 /* look at what we actually got: */
2476 if (np->desc_ver == DESC_VER_1) {
2477 if (likely(flags & NV_RX_DESCRIPTORVALID)) {
2478 len = flags & LEN_MASK_V1;
2479 if (unlikely(flags & NV_RX_ERROR)) {
2480 if (flags & NV_RX_ERROR4) {
2481 len = nv_getlen(dev, skb->data, len);
2483 dev->stats.rx_errors++;
2488 /* framing errors are soft errors */
2489 else if (flags & NV_RX_FRAMINGERR) {
2490 if (flags & NV_RX_SUBSTRACT1) {
2494 /* the rest are hard errors */
2496 if (flags & NV_RX_MISSEDFRAME)
2497 dev->stats.rx_missed_errors++;
2498 if (flags & NV_RX_CRCERR)
2499 dev->stats.rx_crc_errors++;
2500 if (flags & NV_RX_OVERFLOW)
2501 dev->stats.rx_over_errors++;
2502 dev->stats.rx_errors++;
2512 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2513 len = flags & LEN_MASK_V2;
2514 if (unlikely(flags & NV_RX2_ERROR)) {
2515 if (flags & NV_RX2_ERROR4) {
2516 len = nv_getlen(dev, skb->data, len);
2518 dev->stats.rx_errors++;
2523 /* framing errors are soft errors */
2524 else if (flags & NV_RX2_FRAMINGERR) {
2525 if (flags & NV_RX2_SUBSTRACT1) {
2529 /* the rest are hard errors */
2531 if (flags & NV_RX2_CRCERR)
2532 dev->stats.rx_crc_errors++;
2533 if (flags & NV_RX2_OVERFLOW)
2534 dev->stats.rx_over_errors++;
2535 dev->stats.rx_errors++;
2540 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2541 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
2542 skb->ip_summed = CHECKSUM_UNNECESSARY;
2548 /* got a valid packet - forward it to the network core */
2550 skb->protocol = eth_type_trans(skb, dev);
2551 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2552 dev->name, len, skb->protocol);
2553 #ifdef CONFIG_FORCEDETH_NAPI
2554 netif_receive_skb(skb);
2558 dev->last_rx = jiffies;
2559 dev->stats.rx_packets++;
2560 dev->stats.rx_bytes += len;
2562 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2563 np->get_rx.orig = np->first_rx.orig;
2564 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2565 np->get_rx_ctx = np->first_rx_ctx;
2573 static int nv_rx_process_optimized(struct net_device *dev, int limit)
2575 struct fe_priv *np = netdev_priv(dev);
2579 struct sk_buff *skb;
2582 while((np->get_rx.ex != np->put_rx.ex) &&
2583 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2584 (rx_work < limit)) {
2586 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
2590 * the packet is for us - immediately tear down the pci mapping.
2591 * TODO: check if a prefetch of the first cacheline improves
2594 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2595 np->get_rx_ctx->dma_len,
2596 PCI_DMA_FROMDEVICE);
2597 skb = np->get_rx_ctx->skb;
2598 np->get_rx_ctx->skb = NULL;
2602 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2603 for (j=0; j<64; j++) {
2605 dprintk("\n%03x:", j);
2606 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2610 /* look at what we actually got: */
2611 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2612 len = flags & LEN_MASK_V2;
2613 if (unlikely(flags & NV_RX2_ERROR)) {
2614 if (flags & NV_RX2_ERROR4) {
2615 len = nv_getlen(dev, skb->data, len);
2621 /* framing errors are soft errors */
2622 else if (flags & NV_RX2_FRAMINGERR) {
2623 if (flags & NV_RX2_SUBSTRACT1) {
2627 /* the rest are hard errors */
2634 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2635 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
2636 skb->ip_summed = CHECKSUM_UNNECESSARY;
2638 /* got a valid packet - forward it to the network core */
2640 skb->protocol = eth_type_trans(skb, dev);
2641 prefetch(skb->data);
2643 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
2644 dev->name, len, skb->protocol);
2646 if (likely(!np->vlangrp)) {
2647 #ifdef CONFIG_FORCEDETH_NAPI
2648 netif_receive_skb(skb);
2653 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2654 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2655 #ifdef CONFIG_FORCEDETH_NAPI
2656 vlan_hwaccel_receive_skb(skb, np->vlangrp,
2657 vlanflags & NV_RX3_VLAN_TAG_MASK);
2659 vlan_hwaccel_rx(skb, np->vlangrp,
2660 vlanflags & NV_RX3_VLAN_TAG_MASK);
2663 #ifdef CONFIG_FORCEDETH_NAPI
2664 netif_receive_skb(skb);
2671 dev->last_rx = jiffies;
2672 dev->stats.rx_packets++;
2673 dev->stats.rx_bytes += len;
2678 if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
2679 np->get_rx.ex = np->first_rx.ex;
2680 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2681 np->get_rx_ctx = np->first_rx_ctx;
2689 static void set_bufsize(struct net_device *dev)
2691 struct fe_priv *np = netdev_priv(dev);
2693 if (dev->mtu <= ETH_DATA_LEN)
2694 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
2696 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
2700 * nv_change_mtu: dev->change_mtu function
2701 * Called with dev_base_lock held for read.
2703 static int nv_change_mtu(struct net_device *dev, int new_mtu)
2705 struct fe_priv *np = netdev_priv(dev);
2708 if (new_mtu < 64 || new_mtu > np->pkt_limit)
2714 /* return early if the buffer sizes will not change */
2715 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
2717 if (old_mtu == new_mtu)
2720 /* synchronized against open : rtnl_lock() held by caller */
2721 if (netif_running(dev)) {
2722 u8 __iomem *base = get_hwbase(dev);
2724 * It seems that the nic preloads valid ring entries into an
2725 * internal buffer. The procedure for flushing everything is
2726 * guessed, there is probably a simpler approach.
2727 * Changing the MTU is a rare event, it shouldn't matter.
2729 nv_disable_irq(dev);
2730 netif_tx_lock_bh(dev);
2731 spin_lock(&np->lock);
2735 /* drain rx queue */
2737 /* reinit driver view of the rx queue */
2739 if (nv_init_ring(dev)) {
2740 if (!np->in_shutdown)
2741 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2743 /* reinit nic view of the rx queue */
2744 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
2745 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
2746 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
2747 base + NvRegRingSizes);
2749 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2752 /* restart rx engine */
2754 spin_unlock(&np->lock);
2755 netif_tx_unlock_bh(dev);
2761 static void nv_copy_mac_to_hw(struct net_device *dev)
2763 u8 __iomem *base = get_hwbase(dev);
2766 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
2767 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
2768 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
2770 writel(mac[0], base + NvRegMacAddrA);
2771 writel(mac[1], base + NvRegMacAddrB);
2775 * nv_set_mac_address: dev->set_mac_address function
2776 * Called with rtnl_lock() held.
2778 static int nv_set_mac_address(struct net_device *dev, void *addr)
2780 struct fe_priv *np = netdev_priv(dev);
2781 struct sockaddr *macaddr = (struct sockaddr*)addr;
2783 if (!is_valid_ether_addr(macaddr->sa_data))
2784 return -EADDRNOTAVAIL;
2786 /* synchronized against open : rtnl_lock() held by caller */
2787 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
2789 if (netif_running(dev)) {
2790 netif_tx_lock_bh(dev);
2791 spin_lock_irq(&np->lock);
2793 /* stop rx engine */
2796 /* set mac address */
2797 nv_copy_mac_to_hw(dev);
2799 /* restart rx engine */
2801 spin_unlock_irq(&np->lock);
2802 netif_tx_unlock_bh(dev);
2804 nv_copy_mac_to_hw(dev);
2810 * nv_set_multicast: dev->set_multicast function
2811 * Called with netif_tx_lock held.
2813 static void nv_set_multicast(struct net_device *dev)
2815 struct fe_priv *np = netdev_priv(dev);
2816 u8 __iomem *base = get_hwbase(dev);
2819 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
2821 memset(addr, 0, sizeof(addr));
2822 memset(mask, 0, sizeof(mask));
2824 if (dev->flags & IFF_PROMISC) {
2825 pff |= NVREG_PFF_PROMISC;
2827 pff |= NVREG_PFF_MYADDR;
2829 if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
2833 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
2834 if (dev->flags & IFF_ALLMULTI) {
2835 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
2837 struct dev_mc_list *walk;
2839 walk = dev->mc_list;
2840 while (walk != NULL) {
2842 a = le32_to_cpu(*(__le32 *) walk->dmi_addr);
2843 b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4]));
2851 addr[0] = alwaysOn[0];
2852 addr[1] = alwaysOn[1];
2853 mask[0] = alwaysOn[0] | alwaysOff[0];
2854 mask[1] = alwaysOn[1] | alwaysOff[1];
2856 mask[0] = NVREG_MCASTMASKA_NONE;
2857 mask[1] = NVREG_MCASTMASKB_NONE;
2860 addr[0] |= NVREG_MCASTADDRA_FORCE;
2861 pff |= NVREG_PFF_ALWAYS;
2862 spin_lock_irq(&np->lock);
2864 writel(addr[0], base + NvRegMulticastAddrA);
2865 writel(addr[1], base + NvRegMulticastAddrB);
2866 writel(mask[0], base + NvRegMulticastMaskA);
2867 writel(mask[1], base + NvRegMulticastMaskB);
2868 writel(pff, base + NvRegPacketFilterFlags);
2869 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
2872 spin_unlock_irq(&np->lock);
2875 static void nv_update_pause(struct net_device *dev, u32 pause_flags)
2877 struct fe_priv *np = netdev_priv(dev);
2878 u8 __iomem *base = get_hwbase(dev);
2880 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
2882 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
2883 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
2884 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
2885 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
2886 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2888 writel(pff, base + NvRegPacketFilterFlags);
2891 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
2892 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
2893 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
2894 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
2895 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
2896 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
2897 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)
2898 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
2899 writel(pause_enable, base + NvRegTxPauseFrame);
2900 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
2901 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2903 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
2904 writel(regmisc, base + NvRegMisc1);
2910 * nv_update_linkspeed: Setup the MAC according to the link partner
2911 * @dev: Network device to be configured
2913 * The function queries the PHY and checks if there is a link partner.
2914 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
2915 * set to 10 MBit HD.
2917 * The function returns 0 if there is no link partner and 1 if there is
2918 * a good link partner.
2920 static int nv_update_linkspeed(struct net_device *dev)
2922 struct fe_priv *np = netdev_priv(dev);
2923 u8 __iomem *base = get_hwbase(dev);
2926 int adv_lpa, adv_pause, lpa_pause;
2927 int newls = np->linkspeed;
2928 int newdup = np->duplex;
2931 u32 control_1000, status_1000, phyreg, pause_flags, txreg;
2935 /* BMSR_LSTATUS is latched, read it twice:
2936 * we want the current value.
2938 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
2939 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
2941 if (!(mii_status & BMSR_LSTATUS)) {
2942 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
2944 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2950 if (np->autoneg == 0) {
2951 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
2952 dev->name, np->fixed_mode);
2953 if (np->fixed_mode & LPA_100FULL) {
2954 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2956 } else if (np->fixed_mode & LPA_100HALF) {
2957 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2959 } else if (np->fixed_mode & LPA_10FULL) {
2960 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2963 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2969 /* check auto negotiation is complete */
2970 if (!(mii_status & BMSR_ANEGCOMPLETE)) {
2971 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
2972 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2975 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
2979 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
2980 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
2981 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
2982 dev->name, adv, lpa);
2985 if (np->gigabit == PHY_GIGABIT) {
2986 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
2987 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
2989 if ((control_1000 & ADVERTISE_1000FULL) &&
2990 (status_1000 & LPA_1000FULL)) {
2991 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
2993 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
2999 /* FIXME: handle parallel detection properly */
3000 adv_lpa = lpa & adv;
3001 if (adv_lpa & LPA_100FULL) {
3002 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3004 } else if (adv_lpa & LPA_100HALF) {
3005 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3007 } else if (adv_lpa & LPA_10FULL) {
3008 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3010 } else if (adv_lpa & LPA_10HALF) {
3011 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3014 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
3015 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3020 if (np->duplex == newdup && np->linkspeed == newls)
3023 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
3024 dev->name, np->linkspeed, np->duplex, newls, newdup);
3026 np->duplex = newdup;
3027 np->linkspeed = newls;
3029 /* The transmitter and receiver must be restarted for safe update */
3030 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
3031 txrxFlags |= NV_RESTART_TX;
3034 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
3035 txrxFlags |= NV_RESTART_RX;
3039 if (np->gigabit == PHY_GIGABIT) {
3040 phyreg = readl(base + NvRegSlotTime);
3041 phyreg &= ~(0x3FF00);
3042 if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) ||
3043 ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100))
3044 phyreg |= NVREG_SLOTTIME_10_100_FULL;
3045 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
3046 phyreg |= NVREG_SLOTTIME_1000_FULL;
3047 writel(phyreg, base + NvRegSlotTime);
3050 phyreg = readl(base + NvRegPhyInterface);
3051 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
3052 if (np->duplex == 0)
3054 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3056 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3058 writel(phyreg, base + NvRegPhyInterface);
3060 phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
3061 if (phyreg & PHY_RGMII) {
3062 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
3063 txreg = NVREG_TX_DEFERRAL_RGMII_1000;
3065 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
3066 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
3067 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
3069 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
3071 txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3075 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
3076 txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
3078 txreg = NVREG_TX_DEFERRAL_DEFAULT;
3080 writel(txreg, base + NvRegTxDeferral);
3082 if (np->desc_ver == DESC_VER_1) {
3083 txreg = NVREG_TX_WM_DESC1_DEFAULT;
3085 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3086 txreg = NVREG_TX_WM_DESC2_3_1000;
3088 txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3090 writel(txreg, base + NvRegTxWatermark);
3092 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
3095 writel(np->linkspeed, base + NvRegLinkSpeed);
3099 /* setup pause frame */
3100 if (np->duplex != 0) {
3101 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3102 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
3103 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
3105 switch (adv_pause) {
3106 case ADVERTISE_PAUSE_CAP:
3107 if (lpa_pause & LPA_PAUSE_CAP) {
3108 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3109 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3110 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3113 case ADVERTISE_PAUSE_ASYM:
3114 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
3116 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3119 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
3120 if (lpa_pause & LPA_PAUSE_CAP)
3122 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3123 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3124 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3126 if (lpa_pause == LPA_PAUSE_ASYM)
3128 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3133 pause_flags = np->pause_flags;
3136 nv_update_pause(dev, pause_flags);
3138 if (txrxFlags & NV_RESTART_TX)
3140 if (txrxFlags & NV_RESTART_RX)
3146 static void nv_linkchange(struct net_device *dev)
3148 if (nv_update_linkspeed(dev)) {
3149 if (!netif_carrier_ok(dev)) {
3150 netif_carrier_on(dev);
3151 printk(KERN_INFO "%s: link up.\n", dev->name);
3155 if (netif_carrier_ok(dev)) {
3156 netif_carrier_off(dev);
3157 printk(KERN_INFO "%s: link down.\n", dev->name);
3163 static void nv_link_irq(struct net_device *dev)
3165 u8 __iomem *base = get_hwbase(dev);
3168 miistat = readl(base + NvRegMIIStatus);
3169 writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
3170 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
3172 if (miistat & (NVREG_MIISTAT_LINKCHANGE))
3174 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
3177 static irqreturn_t nv_nic_irq(int foo, void *data)
3179 struct net_device *dev = (struct net_device *) data;
3180 struct fe_priv *np = netdev_priv(dev);
3181 u8 __iomem *base = get_hwbase(dev);
3185 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
3188 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3189 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3190 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3192 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3193 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3195 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3196 if (!(events & np->irqmask))
3199 spin_lock(&np->lock);
3201 spin_unlock(&np->lock);
3203 #ifdef CONFIG_FORCEDETH_NAPI
3204 if (events & NVREG_IRQ_RX_ALL) {
3205 netif_rx_schedule(dev, &np->napi);
3207 /* Disable furthur receive irq's */
3208 spin_lock(&np->lock);
3209 np->irqmask &= ~NVREG_IRQ_RX_ALL;
3211 if (np->msi_flags & NV_MSI_X_ENABLED)
3212 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3214 writel(np->irqmask, base + NvRegIrqMask);
3215 spin_unlock(&np->lock);
3218 if (nv_rx_process(dev, RX_WORK_PER_LOOP)) {
3219 if (unlikely(nv_alloc_rx(dev))) {
3220 spin_lock(&np->lock);
3221 if (!np->in_shutdown)
3222 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3223 spin_unlock(&np->lock);
3227 if (unlikely(events & NVREG_IRQ_LINK)) {
3228 spin_lock(&np->lock);
3230 spin_unlock(&np->lock);
3232 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3233 spin_lock(&np->lock);
3235 spin_unlock(&np->lock);
3236 np->link_timeout = jiffies + LINK_TIMEOUT;
3238 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
3239 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3242 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
3243 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3246 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
3247 spin_lock(&np->lock);
3248 /* disable interrupts on the nic */
3249 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3250 writel(0, base + NvRegIrqMask);
3252 writel(np->irqmask, base + NvRegIrqMask);
3255 if (!np->in_shutdown) {
3256 np->nic_poll_irq = np->irqmask;
3257 np->recover_error = 1;
3258 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3260 spin_unlock(&np->lock);
3263 if (unlikely(i > max_interrupt_work)) {
3264 spin_lock(&np->lock);
3265 /* disable interrupts on the nic */
3266 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3267 writel(0, base + NvRegIrqMask);
3269 writel(np->irqmask, base + NvRegIrqMask);
3272 if (!np->in_shutdown) {
3273 np->nic_poll_irq = np->irqmask;
3274 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3276 spin_unlock(&np->lock);
3277 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3282 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
3284 return IRQ_RETVAL(i);
3288 * All _optimized functions are used to help increase performance
3289 * (reduce CPU and increase throughput). They use descripter version 3,
3290 * compiler directives, and reduce memory accesses.
3292 static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3294 struct net_device *dev = (struct net_device *) data;
3295 struct fe_priv *np = netdev_priv(dev);
3296 u8 __iomem *base = get_hwbase(dev);
3300 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
3303 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3304 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3305 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3307 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3308 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3310 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3311 if (!(events & np->irqmask))
3314 spin_lock(&np->lock);
3315 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3316 spin_unlock(&np->lock);
3318 #ifdef CONFIG_FORCEDETH_NAPI
3319 if (events & NVREG_IRQ_RX_ALL) {
3320 netif_rx_schedule(dev, &np->napi);
3322 /* Disable furthur receive irq's */
3323 spin_lock(&np->lock);
3324 np->irqmask &= ~NVREG_IRQ_RX_ALL;
3326 if (np->msi_flags & NV_MSI_X_ENABLED)
3327 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3329 writel(np->irqmask, base + NvRegIrqMask);
3330 spin_unlock(&np->lock);
3333 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3334 if (unlikely(nv_alloc_rx_optimized(dev))) {
3335 spin_lock(&np->lock);
3336 if (!np->in_shutdown)
3337 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3338 spin_unlock(&np->lock);
3342 if (unlikely(events & NVREG_IRQ_LINK)) {
3343 spin_lock(&np->lock);
3345 spin_unlock(&np->lock);
3347 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3348 spin_lock(&np->lock);
3350 spin_unlock(&np->lock);
3351 np->link_timeout = jiffies + LINK_TIMEOUT;
3353 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
3354 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3357 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
3358 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3361 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
3362 spin_lock(&np->lock);
3363 /* disable interrupts on the nic */
3364 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3365 writel(0, base + NvRegIrqMask);
3367 writel(np->irqmask, base + NvRegIrqMask);
3370 if (!np->in_shutdown) {
3371 np->nic_poll_irq = np->irqmask;
3372 np->recover_error = 1;
3373 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3375 spin_unlock(&np->lock);
3379 if (unlikely(i > max_interrupt_work)) {
3380 spin_lock(&np->lock);
3381 /* disable interrupts on the nic */
3382 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3383 writel(0, base + NvRegIrqMask);
3385 writel(np->irqmask, base + NvRegIrqMask);
3388 if (!np->in_shutdown) {
3389 np->nic_poll_irq = np->irqmask;
3390 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3392 spin_unlock(&np->lock);
3393 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3398 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
3400 return IRQ_RETVAL(i);
3403 static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3405 struct net_device *dev = (struct net_device *) data;
3406 struct fe_priv *np = netdev_priv(dev);
3407 u8 __iomem *base = get_hwbase(dev);
3410 unsigned long flags;
3412 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
3415 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3416 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
3417 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
3418 if (!(events & np->irqmask))
3421 spin_lock_irqsave(&np->lock, flags);
3422 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3423 spin_unlock_irqrestore(&np->lock, flags);
3425 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
3426 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3429 if (unlikely(i > max_interrupt_work)) {
3430 spin_lock_irqsave(&np->lock, flags);
3431 /* disable interrupts on the nic */
3432 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
3435 if (!np->in_shutdown) {
3436 np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
3437 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3439 spin_unlock_irqrestore(&np->lock, flags);
3440 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
3445 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
3447 return IRQ_RETVAL(i);
3450 #ifdef CONFIG_FORCEDETH_NAPI
3451 static int nv_napi_poll(struct napi_struct *napi, int budget)
3453 struct fe_priv *np = container_of(napi, struct fe_priv, napi);
3454 struct net_device *dev = np->dev;
3455 u8 __iomem *base = get_hwbase(dev);
3456 unsigned long flags;
3459 if (!nv_optimized(np)) {
3460 pkts = nv_rx_process(dev, budget);
3461 retcode = nv_alloc_rx(dev);
3463 pkts = nv_rx_process_optimized(dev, budget);
3464 retcode = nv_alloc_rx_optimized(dev);
3468 spin_lock_irqsave(&np->lock, flags);
3469 if (!np->in_shutdown)
3470 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3471 spin_unlock_irqrestore(&np->lock, flags);
3474 if (pkts < budget) {
3475 /* re-enable receive interrupts */
3476 spin_lock_irqsave(&np->lock, flags);
3478 __netif_rx_complete(dev, napi);
3480 np->irqmask |= NVREG_IRQ_RX_ALL;
3481 if (np->msi_flags & NV_MSI_X_ENABLED)
3482 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3484 writel(np->irqmask, base + NvRegIrqMask);
3486 spin_unlock_irqrestore(&np->lock, flags);
3492 #ifdef CONFIG_FORCEDETH_NAPI
3493 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3495 struct net_device *dev = (struct net_device *) data;
3496 struct fe_priv *np = netdev_priv(dev);
3497 u8 __iomem *base = get_hwbase(dev);
3500 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3501 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3504 netif_rx_schedule(dev, &np->napi);
3505 /* disable receive interrupts on the nic */
3506 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3512 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3514 struct net_device *dev = (struct net_device *) data;
3515 struct fe_priv *np = netdev_priv(dev);
3516 u8 __iomem *base = get_hwbase(dev);
3519 unsigned long flags;
3521 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
3524 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3525 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3526 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
3527 if (!(events & np->irqmask))
3530 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3531 if (unlikely(nv_alloc_rx_optimized(dev))) {
3532 spin_lock_irqsave(&np->lock, flags);
3533 if (!np->in_shutdown)
3534 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3535 spin_unlock_irqrestore(&np->lock, flags);
3539 if (unlikely(i > max_interrupt_work)) {
3540 spin_lock_irqsave(&np->lock, flags);
3541 /* disable interrupts on the nic */
3542 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3545 if (!np->in_shutdown) {
3546 np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
3547 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3549 spin_unlock_irqrestore(&np->lock, flags);
3550 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
3554 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
3556 return IRQ_RETVAL(i);
3560 static irqreturn_t nv_nic_irq_other(int foo, void *data)
3562 struct net_device *dev = (struct net_device *) data;
3563 struct fe_priv *np = netdev_priv(dev);
3564 u8 __iomem *base = get_hwbase(dev);
3567 unsigned long flags;
3569 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
3572 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3573 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
3574 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3575 if (!(events & np->irqmask))
3578 /* check tx in case we reached max loop limit in tx isr */
3579 spin_lock_irqsave(&np->lock, flags);
3580 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3581 spin_unlock_irqrestore(&np->lock, flags);
3583 if (events & NVREG_IRQ_LINK) {
3584 spin_lock_irqsave(&np->lock, flags);
3586 spin_unlock_irqrestore(&np->lock, flags);
3588 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
3589 spin_lock_irqsave(&np->lock, flags);
3591 spin_unlock_irqrestore(&np->lock, flags);
3592 np->link_timeout = jiffies + LINK_TIMEOUT;
3594 if (events & NVREG_IRQ_RECOVER_ERROR) {
3595 spin_lock_irq(&np->lock);
3596 /* disable interrupts on the nic */
3597 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3600 if (!np->in_shutdown) {
3601 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3602 np->recover_error = 1;
3603 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3605 spin_unlock_irq(&np->lock);
3608 if (events & (NVREG_IRQ_UNKNOWN)) {
3609 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3612 if (unlikely(i > max_interrupt_work)) {
3613 spin_lock_irqsave(&np->lock, flags);
3614 /* disable interrupts on the nic */
3615 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3618 if (!np->in_shutdown) {
3619 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3620 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3622 spin_unlock_irqrestore(&np->lock, flags);
3623 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
3628 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
3630 return IRQ_RETVAL(i);
3633 static irqreturn_t nv_nic_irq_test(int foo, void *data)
3635 struct net_device *dev = (struct net_device *) data;
3636 struct fe_priv *np = netdev_priv(dev);
3637 u8 __iomem *base = get_hwbase(dev);
3640 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
3642 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3643 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3644 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3646 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3647 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3650 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3651 if (!(events & NVREG_IRQ_TIMER))
3652 return IRQ_RETVAL(0);
3654 spin_lock(&np->lock);
3656 spin_unlock(&np->lock);
3658 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
3660 return IRQ_RETVAL(1);
3663 static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3665 u8 __iomem *base = get_hwbase(dev);
3669 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3670 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3671 * the remaining 8 interrupts.
3673 for (i = 0; i < 8; i++) {
3674 if ((irqmask >> i) & 0x1) {
3675 msixmap |= vector << (i << 2);
3678 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3681 for (i = 0; i < 8; i++) {
3682 if ((irqmask >> (i + 8)) & 0x1) {
3683 msixmap |= vector << (i << 2);
3686 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3689 static int nv_request_irq(struct net_device *dev, int intr_test)
3691 struct fe_priv *np = get_nvpriv(dev);
3692 u8 __iomem *base = get_hwbase(dev);
3695 irqreturn_t (*handler)(int foo, void *data);
3698 handler = nv_nic_irq_test;
3700 if (nv_optimized(np))
3701 handler = nv_nic_irq_optimized;
3703 handler = nv_nic_irq;
3706 if (np->msi_flags & NV_MSI_X_CAPABLE) {
3707 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3708 np->msi_x_entry[i].entry = i;
3710 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
3711 np->msi_flags |= NV_MSI_X_ENABLED;
3712 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3713 /* Request irq for rx handling */
3714 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) {
3715 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
3716 pci_disable_msix(np->pci_dev);
3717 np->msi_flags &= ~NV_MSI_X_ENABLED;
3720 /* Request irq for tx handling */
3721 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) {
3722 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
3723 pci_disable_msix(np->pci_dev);
3724 np->msi_flags &= ~NV_MSI_X_ENABLED;
3727 /* Request irq for link and timer handling */
3728 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) {
3729 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
3730 pci_disable_msix(np->pci_dev);
3731 np->msi_flags &= ~NV_MSI_X_ENABLED;
3734 /* map interrupts to their respective vector */
3735 writel(0, base + NvRegMSIXMap0);
3736 writel(0, base + NvRegMSIXMap1);
3737 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
3738 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
3739 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
3741 /* Request irq for all interrupts */
3742 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
3743 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3744 pci_disable_msix(np->pci_dev);
3745 np->msi_flags &= ~NV_MSI_X_ENABLED;
3749 /* map interrupts to vector 0 */
3750 writel(0, base + NvRegMSIXMap0);
3751 writel(0, base + NvRegMSIXMap1);
3755 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
3756 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
3757 np->msi_flags |= NV_MSI_ENABLED;
3758 dev->irq = np->pci_dev->irq;
3759 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
3760 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3761 pci_disable_msi(np->pci_dev);
3762 np->msi_flags &= ~NV_MSI_ENABLED;
3763 dev->irq = np->pci_dev->irq;
3767 /* map interrupts to vector 0 */
3768 writel(0, base + NvRegMSIMap0);
3769 writel(0, base + NvRegMSIMap1);
3770 /* enable msi vector 0 */
3771 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3775 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
3782 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
3784 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
3789 static void nv_free_irq(struct net_device *dev)
3791 struct fe_priv *np = get_nvpriv(dev);
3794 if (np->msi_flags & NV_MSI_X_ENABLED) {
3795 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3796 free_irq(np->msi_x_entry[i].vector, dev);
3798 pci_disable_msix(np->pci_dev);
3799 np->msi_flags &= ~NV_MSI_X_ENABLED;
3801 free_irq(np->pci_dev->irq, dev);
3802 if (np->msi_flags & NV_MSI_ENABLED) {
3803 pci_disable_msi(np->pci_dev);
3804 np->msi_flags &= ~NV_MSI_ENABLED;
3809 static void nv_do_nic_poll(unsigned long data)
3811 struct net_device *dev = (struct net_device *) data;
3812 struct fe_priv *np = netdev_priv(dev);
3813 u8 __iomem *base = get_hwbase(dev);
3817 * First disable irq(s) and then
3818 * reenable interrupts on the nic, we have to do this before calling
3819 * nv_nic_irq because that may decide to do otherwise
3822 if (!using_multi_irqs(dev)) {
3823 if (np->msi_flags & NV_MSI_X_ENABLED)
3824 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3826 disable_irq_lockdep(np->pci_dev->irq);
3829 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
3830 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
3831 mask |= NVREG_IRQ_RX_ALL;
3833 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
3834 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
3835 mask |= NVREG_IRQ_TX_ALL;
3837 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
3838 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
3839 mask |= NVREG_IRQ_OTHER;
3842 np->nic_poll_irq = 0;
3844 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
3846 if (np->recover_error) {
3847 np->recover_error = 0;
3848 printk(KERN_INFO "forcedeth: MAC in recoverable error state\n");
3849 if (netif_running(dev)) {
3850 netif_tx_lock_bh(dev);
3851 spin_lock(&np->lock);
3855 /* drain rx queue */
3857 /* reinit driver view of the rx queue */
3859 if (nv_init_ring(dev)) {
3860 if (!np->in_shutdown)
3861 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3863 /* reinit nic view of the rx queue */
3864 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3865 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3866 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3867 base + NvRegRingSizes);
3869 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3872 /* restart rx engine */
3874 spin_unlock(&np->lock);
3875 netif_tx_unlock_bh(dev);
3880 writel(mask, base + NvRegIrqMask);
3883 if (!using_multi_irqs(dev)) {
3884 if (nv_optimized(np))
3885 nv_nic_irq_optimized(0, dev);
3888 if (np->msi_flags & NV_MSI_X_ENABLED)
3889 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3891 enable_irq_lockdep(np->pci_dev->irq);
3893 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
3894 nv_nic_irq_rx(0, dev);
3895 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
3897 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
3898 nv_nic_irq_tx(0, dev);
3899 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
3901 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
3902 nv_nic_irq_other(0, dev);
3903 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
3908 #ifdef CONFIG_NET_POLL_CONTROLLER
3909 static void nv_poll_controller(struct net_device *dev)
3911 nv_do_nic_poll((unsigned long) dev);
3915 static void nv_do_stats_poll(unsigned long data)
3917 struct net_device *dev = (struct net_device *) data;
3918 struct fe_priv *np = netdev_priv(dev);
3920 nv_get_hw_stats(dev);
3922 if (!np->in_shutdown)
3923 mod_timer(&np->stats_poll,
3924 round_jiffies(jiffies + STATS_INTERVAL));
3927 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3929 struct fe_priv *np = netdev_priv(dev);
3930 strcpy(info->driver, DRV_NAME);
3931 strcpy(info->version, FORCEDETH_VERSION);
3932 strcpy(info->bus_info, pci_name(np->pci_dev));
3935 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3937 struct fe_priv *np = netdev_priv(dev);
3938 wolinfo->supported = WAKE_MAGIC;
3940 spin_lock_irq(&np->lock);
3942 wolinfo->wolopts = WAKE_MAGIC;
3943 spin_unlock_irq(&np->lock);
3946 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3948 struct fe_priv *np = netdev_priv(dev);
3949 u8 __iomem *base = get_hwbase(dev);
3952 if (wolinfo->wolopts == 0) {
3954 } else if (wolinfo->wolopts & WAKE_MAGIC) {
3956 flags = NVREG_WAKEUPFLAGS_ENABLE;
3958 if (netif_running(dev)) {
3959 spin_lock_irq(&np->lock);
3960 writel(flags, base + NvRegWakeUpFlags);
3961 spin_unlock_irq(&np->lock);
3966 static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3968 struct fe_priv *np = netdev_priv(dev);
3971 spin_lock_irq(&np->lock);
3972 ecmd->port = PORT_MII;
3973 if (!netif_running(dev)) {
3974 /* We do not track link speed / duplex setting if the
3975 * interface is disabled. Force a link check */
3976 if (nv_update_linkspeed(dev)) {
3977 if (!netif_carrier_ok(dev))
3978 netif_carrier_on(dev);
3980 if (netif_carrier_ok(dev))
3981 netif_carrier_off(dev);
3985 if (netif_carrier_ok(dev)) {
3986 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
3987 case NVREG_LINKSPEED_10:
3988 ecmd->speed = SPEED_10;
3990 case NVREG_LINKSPEED_100:
3991 ecmd->speed = SPEED_100;
3993 case NVREG_LINKSPEED_1000:
3994 ecmd->speed = SPEED_1000;
3997 ecmd->duplex = DUPLEX_HALF;
3999 ecmd->duplex = DUPLEX_FULL;
4005 ecmd->autoneg = np->autoneg;
4007 ecmd->advertising = ADVERTISED_MII;
4009 ecmd->advertising |= ADVERTISED_Autoneg;
4010 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4011 if (adv & ADVERTISE_10HALF)
4012 ecmd->advertising |= ADVERTISED_10baseT_Half;
4013 if (adv & ADVERTISE_10FULL)
4014 ecmd->advertising |= ADVERTISED_10baseT_Full;
4015 if (adv & ADVERTISE_100HALF)
4016 ecmd->advertising |= ADVERTISED_100baseT_Half;
4017 if (adv & ADVERTISE_100FULL)
4018 ecmd->advertising |= ADVERTISED_100baseT_Full;
4019 if (np->gigabit == PHY_GIGABIT) {
4020 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4021 if (adv & ADVERTISE_1000FULL)
4022 ecmd->advertising |= ADVERTISED_1000baseT_Full;
4025 ecmd->supported = (SUPPORTED_Autoneg |
4026 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
4027 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
4029 if (np->gigabit == PHY_GIGABIT)
4030 ecmd->supported |= SUPPORTED_1000baseT_Full;
4032 ecmd->phy_address = np->phyaddr;
4033 ecmd->transceiver = XCVR_EXTERNAL;
4035 /* ignore maxtxpkt, maxrxpkt for now */
4036 spin_unlock_irq(&np->lock);
4040 static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4042 struct fe_priv *np = netdev_priv(dev);
4044 if (ecmd->port != PORT_MII)
4046 if (ecmd->transceiver != XCVR_EXTERNAL)
4048 if (ecmd->phy_address != np->phyaddr) {
4049 /* TODO: support switching between multiple phys. Should be
4050 * trivial, but not enabled due to lack of test hardware. */
4053 if (ecmd->autoneg == AUTONEG_ENABLE) {
4056 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4057 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
4058 if (np->gigabit == PHY_GIGABIT)
4059 mask |= ADVERTISED_1000baseT_Full;
4061 if ((ecmd->advertising & mask) == 0)
4064 } else if (ecmd->autoneg == AUTONEG_DISABLE) {
4065 /* Note: autonegotiation disable, speed 1000 intentionally
4066 * forbidden - noone should need that. */
4068 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
4070 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
4076 netif_carrier_off(dev);
4077 if (netif_running(dev)) {
4078 nv_disable_irq(dev);
4079 netif_tx_lock_bh(dev);
4080 spin_lock(&np->lock);
4083 spin_unlock(&np->lock);
4084 netif_tx_unlock_bh(dev);
4087 if (ecmd->autoneg == AUTONEG_ENABLE) {
4092 /* advertise only what has been requested */
4093 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4094 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4095 if (ecmd->advertising & ADVERTISED_10baseT_Half)
4096 adv |= ADVERTISE_10HALF;
4097 if (ecmd->advertising & ADVERTISED_10baseT_Full)
4098 adv |= ADVERTISE_10FULL;
4099 if (ecmd->advertising & ADVERTISED_100baseT_Half)
4100 adv |= ADVERTISE_100HALF;
4101 if (ecmd->advertising & ADVERTISED_100baseT_Full)
4102 adv |= ADVERTISE_100FULL;
4103 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
4104 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4105 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4106 adv |= ADVERTISE_PAUSE_ASYM;
4107 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4109 if (np->gigabit == PHY_GIGABIT) {
4110 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4111 adv &= ~ADVERTISE_1000FULL;
4112 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
4113 adv |= ADVERTISE_1000FULL;
4114 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4117 if (netif_running(dev))
4118 printk(KERN_INFO "%s: link down.\n", dev->name);
4119 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4120 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4121 bmcr |= BMCR_ANENABLE;
4122 /* reset the phy in order for settings to stick,
4123 * and cause autoneg to start */
4124 if (phy_reset(dev, bmcr)) {
4125 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4129 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4130 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4137 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4138 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4139 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
4140 adv |= ADVERTISE_10HALF;
4141 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
4142 adv |= ADVERTISE_10FULL;
4143 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
4144 adv |= ADVERTISE_100HALF;
4145 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
4146 adv |= ADVERTISE_100FULL;
4147 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4148 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
4149 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4150 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4152 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
4153 adv |= ADVERTISE_PAUSE_ASYM;
4154 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4156 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4157 np->fixed_mode = adv;
4159 if (np->gigabit == PHY_GIGABIT) {
4160 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4161 adv &= ~ADVERTISE_1000FULL;
4162 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4165 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4166 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
4167 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
4168 bmcr |= BMCR_FULLDPLX;
4169 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
4170 bmcr |= BMCR_SPEED100;
4171 if (np->phy_oui == PHY_OUI_MARVELL) {
4172 /* reset the phy in order for forced mode settings to stick */
4173 if (phy_reset(dev, bmcr)) {
4174 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4178 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4179 if (netif_running(dev)) {
4180 /* Wait a bit and then reconfigure the nic. */
4187 if (netif_running(dev)) {
4195 #define FORCEDETH_REGS_VER 1
4197 static int nv_get_regs_len(struct net_device *dev)
4199 struct fe_priv *np = netdev_priv(dev);
4200 return np->register_size;
4203 static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
4205 struct fe_priv *np = netdev_priv(dev);
4206 u8 __iomem *base = get_hwbase(dev);
4210 regs->version = FORCEDETH_REGS_VER;
4211 spin_lock_irq(&np->lock);
4212 for (i = 0;i <= np->register_size/sizeof(u32); i++)
4213 rbuf[i] = readl(base + i*sizeof(u32));
4214 spin_unlock_irq(&np->lock);
4217 static int nv_nway_reset(struct net_device *dev)
4219 struct fe_priv *np = netdev_priv(dev);
4225 netif_carrier_off(dev);
4226 if (netif_running(dev)) {
4227 nv_disable_irq(dev);
4228 netif_tx_lock_bh(dev);
4229 spin_lock(&np->lock);
4232 spin_unlock(&np->lock);
4233 netif_tx_unlock_bh(dev);
4234 printk(KERN_INFO "%s: link down.\n", dev->name);
4237 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4238 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4239 bmcr |= BMCR_ANENABLE;
4240 /* reset the phy in order for settings to stick*/
4241 if (phy_reset(dev, bmcr)) {
4242 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4246 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4247 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4250 if (netif_running(dev)) {
4262 static int nv_set_tso(struct net_device *dev, u32 value)
4264 struct fe_priv *np = netdev_priv(dev);
4266 if ((np->driver_data & DEV_HAS_CHECKSUM))
4267 return ethtool_op_set_tso(dev, value);
4272 static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4274 struct fe_priv *np = netdev_priv(dev);
4276 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4277 ring->rx_mini_max_pending = 0;
4278 ring->rx_jumbo_max_pending = 0;
4279 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4281 ring->rx_pending = np->rx_ring_size;
4282 ring->rx_mini_pending = 0;
4283 ring->rx_jumbo_pending = 0;
4284 ring->tx_pending = np->tx_ring_size;
4287 static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4289 struct fe_priv *np = netdev_priv(dev);
4290 u8 __iomem *base = get_hwbase(dev);
4291 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
4292 dma_addr_t ring_addr;
4294 if (ring->rx_pending < RX_RING_MIN ||
4295 ring->tx_pending < TX_RING_MIN ||
4296 ring->rx_mini_pending != 0 ||
4297 ring->rx_jumbo_pending != 0 ||
4298 (np->desc_ver == DESC_VER_1 &&
4299 (ring->rx_pending > RING_MAX_DESC_VER_1 ||
4300 ring->tx_pending > RING_MAX_DESC_VER_1)) ||
4301 (np->desc_ver != DESC_VER_1 &&
4302 (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
4303 ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
4307 /* allocate new rings */
4308 if (!nv_optimized(np)) {
4309 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4310 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4313 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4314 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4317 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
4318 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
4319 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
4320 /* fall back to old rings */
4321 if (!nv_optimized(np)) {
4323 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4324 rxtx_ring, ring_addr);
4327 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4328 rxtx_ring, ring_addr);
4337 if (netif_running(dev)) {
4338 nv_disable_irq(dev);
4339 netif_tx_lock_bh(dev);
4340 spin_lock(&np->lock);
4350 /* set new values */
4351 np->rx_ring_size = ring->rx_pending;
4352 np->tx_ring_size = ring->tx_pending;
4354 if (!nv_optimized(np)) {
4355 np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
4356 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4358 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
4359 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4361 np->rx_skb = (struct nv_skb_map*)rx_skbuff;
4362 np->tx_skb = (struct nv_skb_map*)tx_skbuff;
4363 np->ring_addr = ring_addr;
4365 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4366 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4368 if (netif_running(dev)) {
4369 /* reinit driver view of the queues */
4371 if (nv_init_ring(dev)) {
4372 if (!np->in_shutdown)
4373 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4376 /* reinit nic view of the queues */
4377 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4378 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4379 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4380 base + NvRegRingSizes);
4382 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4385 /* restart engines */
4387 spin_unlock(&np->lock);
4388 netif_tx_unlock_bh(dev);
4396 static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4398 struct fe_priv *np = netdev_priv(dev);
4400 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
4401 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
4402 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
4405 static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4407 struct fe_priv *np = netdev_priv(dev);
4410 if ((!np->autoneg && np->duplex == 0) ||
4411 (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4412 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
4416 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4417 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
4421 netif_carrier_off(dev);
4422 if (netif_running(dev)) {
4423 nv_disable_irq(dev);
4424 netif_tx_lock_bh(dev);
4425 spin_lock(&np->lock);
4428 spin_unlock(&np->lock);
4429 netif_tx_unlock_bh(dev);
4432 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
4433 if (pause->rx_pause)
4434 np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
4435 if (pause->tx_pause)
4436 np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
4438 if (np->autoneg && pause->autoneg) {
4439 np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
4441 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4442 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4443 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
4444 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4445 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4446 adv |= ADVERTISE_PAUSE_ASYM;
4447 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4449 if (netif_running(dev))
4450 printk(KERN_INFO "%s: link down.\n", dev->name);
4451 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4452 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4453 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4455 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4456 if (pause->rx_pause)
4457 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4458 if (pause->tx_pause)
4459 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4461 if (!netif_running(dev))
4462 nv_update_linkspeed(dev);
4464 nv_update_pause(dev, np->pause_flags);
4467 if (netif_running(dev)) {
4474 static u32 nv_get_rx_csum(struct net_device *dev)
4476 struct fe_priv *np = netdev_priv(dev);
4477 return (np->rx_csum) != 0;
4480 static int nv_set_rx_csum(struct net_device *dev, u32 data)
4482 struct fe_priv *np = netdev_priv(dev);
4483 u8 __iomem *base = get_hwbase(dev);
4486 if (np->driver_data & DEV_HAS_CHECKSUM) {
4489 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4492 /* vlan is dependent on rx checksum offload */
4493 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
4494 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
4496 if (netif_running(dev)) {
4497 spin_lock_irq(&np->lock);
4498 writel(np->txrxctl_bits, base + NvRegTxRxControl);
4499 spin_unlock_irq(&np->lock);
4508 static int nv_set_tx_csum(struct net_device *dev, u32 data)
4510 struct fe_priv *np = netdev_priv(dev);
4512 if (np->driver_data & DEV_HAS_CHECKSUM)
4513 return ethtool_op_set_tx_hw_csum(dev, data);
4518 static int nv_set_sg(struct net_device *dev, u32 data)
4520 struct fe_priv *np = netdev_priv(dev);
4522 if (np->driver_data & DEV_HAS_CHECKSUM)
4523 return ethtool_op_set_sg(dev, data);
4528 static int nv_get_sset_count(struct net_device *dev, int sset)
4530 struct fe_priv *np = netdev_priv(dev);
4534 if (np->driver_data & DEV_HAS_TEST_EXTENDED)
4535 return NV_TEST_COUNT_EXTENDED;
4537 return NV_TEST_COUNT_BASE;
4539 if (np->driver_data & DEV_HAS_STATISTICS_V1)
4540 return NV_DEV_STATISTICS_V1_COUNT;
4541 else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4542 return NV_DEV_STATISTICS_V2_COUNT;
4550 static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
4552 struct fe_priv *np = netdev_priv(dev);
4555 nv_do_stats_poll((unsigned long)dev);
4557 memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
4560 static int nv_link_test(struct net_device *dev)
4562 struct fe_priv *np = netdev_priv(dev);
4565 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4566 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4568 /* check phy link status */
4569 if (!(mii_status & BMSR_LSTATUS))
4575 static int nv_register_test(struct net_device *dev)
4577 u8 __iomem *base = get_hwbase(dev);
4579 u32 orig_read, new_read;
4582 orig_read = readl(base + nv_registers_test[i].reg);
4584 /* xor with mask to toggle bits */
4585 orig_read ^= nv_registers_test[i].mask;
4587 writel(orig_read, base + nv_registers_test[i].reg);
4589 new_read = readl(base + nv_registers_test[i].reg);
4591 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
4594 /* restore original value */
4595 orig_read ^= nv_registers_test[i].mask;
4596 writel(orig_read, base + nv_registers_test[i].reg);
4598 } while (nv_registers_test[++i].reg != 0);
4603 static int nv_interrupt_test(struct net_device *dev)
4605 struct fe_priv *np = netdev_priv(dev);
4606 u8 __iomem *base = get_hwbase(dev);
4609 u32 save_msi_flags, save_poll_interval = 0;
4611 if (netif_running(dev)) {
4612 /* free current irq */
4614 save_poll_interval = readl(base+NvRegPollingInterval);
4617 /* flag to test interrupt handler */
4620 /* setup test irq */
4621 save_msi_flags = np->msi_flags;
4622 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
4623 np->msi_flags |= 0x001; /* setup 1 vector */
4624 if (nv_request_irq(dev, 1))
4627 /* setup timer interrupt */
4628 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4629 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4631 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4633 /* wait for at least one interrupt */
4636 spin_lock_irq(&np->lock);
4638 /* flag should be set within ISR */
4639 testcnt = np->intr_test;
4643 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4644 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4645 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4647 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4649 spin_unlock_irq(&np->lock);
4653 np->msi_flags = save_msi_flags;
4655 if (netif_running(dev)) {
4656 writel(save_poll_interval, base + NvRegPollingInterval);
4657 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4658 /* restore original irq */
4659 if (nv_request_irq(dev, 0))
4666 static int nv_loopback_test(struct net_device *dev)
4668 struct fe_priv *np = netdev_priv(dev);
4669 u8 __iomem *base = get_hwbase(dev);
4670 struct sk_buff *tx_skb, *rx_skb;
4671 dma_addr_t test_dma_addr;
4672 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
4674 int len, i, pkt_len;
4676 u32 filter_flags = 0;
4677 u32 misc1_flags = 0;
4680 if (netif_running(dev)) {
4681 nv_disable_irq(dev);
4682 filter_flags = readl(base + NvRegPacketFilterFlags);
4683 misc1_flags = readl(base + NvRegMisc1);
4688 /* reinit driver view of the rx queue */
4692 /* setup hardware for loopback */
4693 writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
4694 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
4696 /* reinit nic view of the rx queue */
4697 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4698 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4699 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4700 base + NvRegRingSizes);
4703 /* restart rx engine */
4706 /* setup packet for tx */
4707 pkt_len = ETH_DATA_LEN;
4708 tx_skb = dev_alloc_skb(pkt_len);
4710 printk(KERN_ERR "dev_alloc_skb() failed during loopback test"
4711 " of %s\n", dev->name);
4715 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
4716 skb_tailroom(tx_skb),
4717 PCI_DMA_FROMDEVICE);
4718 pkt_data = skb_put(tx_skb, pkt_len);
4719 for (i = 0; i < pkt_len; i++)
4720 pkt_data[i] = (u8)(i & 0xff);
4722 if (!nv_optimized(np)) {
4723 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
4724 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4726 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
4727 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
4728 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4730 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4731 pci_push(get_hwbase(dev));
4735 /* check for rx of the packet */
4736 if (!nv_optimized(np)) {
4737 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
4738 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
4741 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
4742 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
4745 if (flags & NV_RX_AVAIL) {
4747 } else if (np->desc_ver == DESC_VER_1) {
4748 if (flags & NV_RX_ERROR)
4751 if (flags & NV_RX2_ERROR) {
4757 if (len != pkt_len) {
4759 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
4760 dev->name, len, pkt_len);
4762 rx_skb = np->rx_skb[0].skb;
4763 for (i = 0; i < pkt_len; i++) {
4764 if (rx_skb->data[i] != (u8)(i & 0xff)) {
4766 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
4773 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
4776 pci_unmap_page(np->pci_dev, test_dma_addr,
4777 (skb_end_pointer(tx_skb) - tx_skb->data),
4779 dev_kfree_skb_any(tx_skb);
4784 /* drain rx queue */
4787 if (netif_running(dev)) {
4788 writel(misc1_flags, base + NvRegMisc1);
4789 writel(filter_flags, base + NvRegPacketFilterFlags);
4796 static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
4798 struct fe_priv *np = netdev_priv(dev);
4799 u8 __iomem *base = get_hwbase(dev);
4801 memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64));
4803 if (!nv_link_test(dev)) {
4804 test->flags |= ETH_TEST_FL_FAILED;
4808 if (test->flags & ETH_TEST_FL_OFFLINE) {
4809 if (netif_running(dev)) {
4810 netif_stop_queue(dev);
4811 #ifdef CONFIG_FORCEDETH_NAPI
4812 napi_disable(&np->napi);
4814 netif_tx_lock_bh(dev);
4815 spin_lock_irq(&np->lock);
4816 nv_disable_hw_interrupts(dev, np->irqmask);
4817 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
4818 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4820 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4825 /* drain rx queue */
4827 spin_unlock_irq(&np->lock);
4828 netif_tx_unlock_bh(dev);
4831 if (!nv_register_test(dev)) {
4832 test->flags |= ETH_TEST_FL_FAILED;
4836 result = nv_interrupt_test(dev);
4838 test->flags |= ETH_TEST_FL_FAILED;
4846 if (!nv_loopback_test(dev)) {
4847 test->flags |= ETH_TEST_FL_FAILED;
4851 if (netif_running(dev)) {
4852 /* reinit driver view of the rx queue */
4854 if (nv_init_ring(dev)) {
4855 if (!np->in_shutdown)
4856 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4858 /* reinit nic view of the rx queue */
4859 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4860 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4861 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4862 base + NvRegRingSizes);
4864 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4866 /* restart rx engine */
4868 netif_start_queue(dev);
4869 #ifdef CONFIG_FORCEDETH_NAPI
4870 napi_enable(&np->napi);
4872 nv_enable_hw_interrupts(dev, np->irqmask);
4877 static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
4879 switch (stringset) {
4881 memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str));
4884 memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str));
4889 static const struct ethtool_ops ops = {
4890 .get_drvinfo = nv_get_drvinfo,
4891 .get_link = ethtool_op_get_link,
4892 .get_wol = nv_get_wol,
4893 .set_wol = nv_set_wol,
4894 .get_settings = nv_get_settings,
4895 .set_settings = nv_set_settings,
4896 .get_regs_len = nv_get_regs_len,
4897 .get_regs = nv_get_regs,
4898 .nway_reset = nv_nway_reset,
4899 .set_tso = nv_set_tso,
4900 .get_ringparam = nv_get_ringparam,
4901 .set_ringparam = nv_set_ringparam,
4902 .get_pauseparam = nv_get_pauseparam,
4903 .set_pauseparam = nv_set_pauseparam,
4904 .get_rx_csum = nv_get_rx_csum,
4905 .set_rx_csum = nv_set_rx_csum,
4906 .set_tx_csum = nv_set_tx_csum,
4907 .set_sg = nv_set_sg,
4908 .get_strings = nv_get_strings,
4909 .get_ethtool_stats = nv_get_ethtool_stats,
4910 .get_sset_count = nv_get_sset_count,
4911 .self_test = nv_self_test,
4914 static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
4916 struct fe_priv *np = get_nvpriv(dev);
4918 spin_lock_irq(&np->lock);
4920 /* save vlan group */
4924 /* enable vlan on MAC */
4925 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
4927 /* disable vlan on MAC */
4928 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
4929 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
4932 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4934 spin_unlock_irq(&np->lock);
4937 /* The mgmt unit and driver use a semaphore to access the phy during init */
4938 static int nv_mgmt_acquire_sema(struct net_device *dev)
4940 u8 __iomem *base = get_hwbase(dev);
4942 u32 tx_ctrl, mgmt_sema;
4944 for (i = 0; i < 10; i++) {
4945 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
4946 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
4951 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
4954 for (i = 0; i < 2; i++) {
4955 tx_ctrl = readl(base + NvRegTransmitterControl);
4956 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
4957 writel(tx_ctrl, base + NvRegTransmitterControl);
4959 /* verify that semaphore was acquired */
4960 tx_ctrl = readl(base + NvRegTransmitterControl);
4961 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
4962 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE))
4971 static int nv_open(struct net_device *dev)
4973 struct fe_priv *np = netdev_priv(dev);
4974 u8 __iomem *base = get_hwbase(dev);
4979 dprintk(KERN_DEBUG "nv_open: begin\n");
4981 /* erase previous misconfiguration */
4982 if (np->driver_data & DEV_HAS_POWER_CNTRL)
4984 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
4985 writel(0, base + NvRegMulticastAddrB);
4986 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
4987 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
4988 writel(0, base + NvRegPacketFilterFlags);
4990 writel(0, base + NvRegTransmitterControl);
4991 writel(0, base + NvRegReceiverControl);
4993 writel(0, base + NvRegAdapterControl);
4995 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
4996 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
4998 /* initialize descriptor rings */
5000 oom = nv_init_ring(dev);
5002 writel(0, base + NvRegLinkSpeed);
5003 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5005 writel(0, base + NvRegUnknownSetupReg6);
5007 np->in_shutdown = 0;
5010 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5011 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5012 base + NvRegRingSizes);
5014 writel(np->linkspeed, base + NvRegLinkSpeed);
5015 if (np->desc_ver == DESC_VER_1)
5016 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
5018 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
5019 writel(np->txrxctl_bits, base + NvRegTxRxControl);
5020 writel(np->vlanctl_bits, base + NvRegVlanControl);
5022 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
5023 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
5024 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
5025 KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
5027 writel(0, base + NvRegMIIMask);
5028 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5029 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5031 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
5032 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
5033 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
5034 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5036 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
5038 get_random_bytes(&low, sizeof(low));
5039 low &= NVREG_SLOTTIME_MASK;
5040 if (np->desc_ver == DESC_VER_1) {
5041 writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime);
5043 if (!(np->driver_data & DEV_HAS_GEAR_MODE)) {
5044 /* setup legacy backoff */
5045 writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime);
5047 writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime);
5048 nv_gear_backoff_reseed(dev);
5051 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
5052 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
5053 if (poll_interval == -1) {
5054 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
5055 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
5057 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
5060 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
5061 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5062 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
5063 base + NvRegAdapterControl);
5064 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
5065 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
5067 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
5069 i = readl(base + NvRegPowerState);
5070 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
5071 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
5075 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
5077 nv_disable_hw_interrupts(dev, np->irqmask);
5079 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5080 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5083 if (nv_request_irq(dev, 0)) {
5087 /* ask for interrupts */
5088 nv_enable_hw_interrupts(dev, np->irqmask);
5090 spin_lock_irq(&np->lock);
5091 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5092 writel(0, base + NvRegMulticastAddrB);
5093 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5094 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5095 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5096 /* One manual link speed update: Interrupts are enabled, future link
5097 * speed changes cause interrupts and are handled by nv_link_irq().
5101 miistat = readl(base + NvRegMIIStatus);
5102 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5103 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
5105 /* set linkspeed to invalid value, thus force nv_update_linkspeed
5108 ret = nv_update_linkspeed(dev);
5110 netif_start_queue(dev);
5111 #ifdef CONFIG_FORCEDETH_NAPI
5112 napi_enable(&np->napi);
5116 netif_carrier_on(dev);
5118 printk(KERN_INFO "%s: no link during initialization.\n", dev->name);
5119 netif_carrier_off(dev);
5122 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5124 /* start statistics timer */
5125 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2))
5126 mod_timer(&np->stats_poll,
5127 round_jiffies(jiffies + STATS_INTERVAL));
5129 spin_unlock_irq(&np->lock);
5137 static int nv_close(struct net_device *dev)
5139 struct fe_priv *np = netdev_priv(dev);
5142 spin_lock_irq(&np->lock);
5143 np->in_shutdown = 1;
5144 spin_unlock_irq(&np->lock);
5145 #ifdef CONFIG_FORCEDETH_NAPI
5146 napi_disable(&np->napi);
5148 synchronize_irq(np->pci_dev->irq);
5150 del_timer_sync(&np->oom_kick);
5151 del_timer_sync(&np->nic_poll);
5152 del_timer_sync(&np->stats_poll);
5154 netif_stop_queue(dev);
5155 spin_lock_irq(&np->lock);
5159 /* disable interrupts on the nic or we will lock up */
5160 base = get_hwbase(dev);
5161 nv_disable_hw_interrupts(dev, np->irqmask);
5163 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
5165 spin_unlock_irq(&np->lock);
5171 if (np->wolenabled) {
5172 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5176 /* FIXME: power down nic */
5181 static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
5183 struct net_device *dev;
5188 u32 powerstate, txreg;
5189 u32 phystate_orig = 0, phystate;
5190 int phyinitialized = 0;
5191 DECLARE_MAC_BUF(mac);
5192 static int printed_version;
5194 if (!printed_version++)
5195 printk(KERN_INFO "%s: Reverse Engineered nForce ethernet"
5196 " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION);
5198 dev = alloc_etherdev(sizeof(struct fe_priv));
5203 np = netdev_priv(dev);
5205 np->pci_dev = pci_dev;
5206 spin_lock_init(&np->lock);
5207 SET_NETDEV_DEV(dev, &pci_dev->dev);
5209 init_timer(&np->oom_kick);
5210 np->oom_kick.data = (unsigned long) dev;
5211 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */
5212 init_timer(&np->nic_poll);
5213 np->nic_poll.data = (unsigned long) dev;
5214 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
5215 init_timer(&np->stats_poll);
5216 np->stats_poll.data = (unsigned long) dev;
5217 np->stats_poll.function = &nv_do_stats_poll; /* timer handler */
5219 err = pci_enable_device(pci_dev);
5223 pci_set_master(pci_dev);
5225 err = pci_request_regions(pci_dev, DRV_NAME);
5229 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2))
5230 np->register_size = NV_PCI_REGSZ_VER3;
5231 else if (id->driver_data & DEV_HAS_STATISTICS_V1)
5232 np->register_size = NV_PCI_REGSZ_VER2;
5234 np->register_size = NV_PCI_REGSZ_VER1;
5238 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5239 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
5240 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
5241 pci_resource_len(pci_dev, i),
5242 pci_resource_flags(pci_dev, i));
5243 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
5244 pci_resource_len(pci_dev, i) >= np->register_size) {
5245 addr = pci_resource_start(pci_dev, i);
5249 if (i == DEVICE_COUNT_RESOURCE) {
5250 dev_printk(KERN_INFO, &pci_dev->dev,
5251 "Couldn't find register window\n");
5255 /* copy of driver data */
5256 np->driver_data = id->driver_data;
5258 /* handle different descriptor versions */
5259 if (id->driver_data & DEV_HAS_HIGH_DMA) {
5260 /* packet format 3: supports 40-bit addressing */
5261 np->desc_ver = DESC_VER_3;
5262 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
5264 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK))
5265 dev_printk(KERN_INFO, &pci_dev->dev,
5266 "64-bit DMA failed, using 32-bit addressing\n");
5268 dev->features |= NETIF_F_HIGHDMA;
5269 if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) {
5270 dev_printk(KERN_INFO, &pci_dev->dev,
5271 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
5274 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
5275 /* packet format 2: supports jumbo frames */
5276 np->desc_ver = DESC_VER_2;
5277 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
5279 /* original packet format */
5280 np->desc_ver = DESC_VER_1;
5281 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
5284 np->pkt_limit = NV_PKTLIMIT_1;
5285 if (id->driver_data & DEV_HAS_LARGEDESC)
5286 np->pkt_limit = NV_PKTLIMIT_2;
5288 if (id->driver_data & DEV_HAS_CHECKSUM) {
5290 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5291 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5292 dev->features |= NETIF_F_TSO;
5295 np->vlanctl_bits = 0;
5296 if (id->driver_data & DEV_HAS_VLAN) {
5297 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5298 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
5299 dev->vlan_rx_register = nv_vlan_rx_register;
5303 if ((id->driver_data & DEV_HAS_MSI) && msi) {
5304 np->msi_flags |= NV_MSI_CAPABLE;
5306 if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5307 np->msi_flags |= NV_MSI_X_CAPABLE;
5310 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
5311 if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
5312 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
5313 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
5314 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
5319 np->base = ioremap(addr, np->register_size);
5322 dev->base_addr = (unsigned long)np->base;
5324 dev->irq = pci_dev->irq;
5326 np->rx_ring_size = RX_RING_DEFAULT;
5327 np->tx_ring_size = TX_RING_DEFAULT;
5329 if (!nv_optimized(np)) {
5330 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
5331 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
5333 if (!np->rx_ring.orig)
5335 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
5337 np->rx_ring.ex = pci_alloc_consistent(pci_dev,
5338 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
5340 if (!np->rx_ring.ex)
5342 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
5344 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5345 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5346 if (!np->rx_skb || !np->tx_skb)
5349 dev->open = nv_open;
5350 dev->stop = nv_close;
5352 if (!nv_optimized(np))
5353 dev->hard_start_xmit = nv_start_xmit;
5355 dev->hard_start_xmit = nv_start_xmit_optimized;
5356 dev->get_stats = nv_get_stats;
5357 dev->change_mtu = nv_change_mtu;
5358 dev->set_mac_address = nv_set_mac_address;
5359 dev->set_multicast_list = nv_set_multicast;
5360 #ifdef CONFIG_NET_POLL_CONTROLLER
5361 dev->poll_controller = nv_poll_controller;
5363 #ifdef CONFIG_FORCEDETH_NAPI
5364 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
5366 SET_ETHTOOL_OPS(dev, &ops);
5367 dev->tx_timeout = nv_tx_timeout;
5368 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5370 pci_set_drvdata(pci_dev, dev);
5372 /* read the mac address */
5373 base = get_hwbase(dev);
5374 np->orig_mac[0] = readl(base + NvRegMacAddrA);
5375 np->orig_mac[1] = readl(base + NvRegMacAddrB);
5377 /* check the workaround bit for correct mac address order */
5378 txreg = readl(base + NvRegTransmitPoll);
5379 if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
5380 /* mac address is already in correct order */
5381 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5382 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
5383 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5384 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5385 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
5386 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
5387 } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
5388 /* mac address is already in correct order */
5389 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5390 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
5391 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5392 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5393 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
5394 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
5396 * Set orig mac address back to the reversed version.
5397 * This flag will be cleared during low power transition.
5398 * Therefore, we should always put back the reversed address.
5400 np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) +
5401 (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24);
5402 np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8);
5404 /* need to reverse mac address to correct order */
5405 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
5406 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
5407 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
5408 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
5409 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
5410 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
5411 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5413 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5415 if (!is_valid_ether_addr(dev->perm_addr)) {
5417 * Bad mac address. At least one bios sets the mac address
5418 * to 01:23:45:67:89:ab
5420 dev_printk(KERN_ERR, &pci_dev->dev,
5421 "Invalid Mac address detected: %s\n",
5422 print_mac(mac, dev->dev_addr));
5423 dev_printk(KERN_ERR, &pci_dev->dev,
5424 "Please complain to your hardware vendor. Switching to a random MAC.\n");
5425 dev->dev_addr[0] = 0x00;
5426 dev->dev_addr[1] = 0x00;
5427 dev->dev_addr[2] = 0x6c;
5428 get_random_bytes(&dev->dev_addr[3], 3);
5431 dprintk(KERN_DEBUG "%s: MAC Address %s\n",
5432 pci_name(pci_dev), print_mac(mac, dev->dev_addr));
5434 /* set mac address */
5435 nv_copy_mac_to_hw(dev);
5438 writel(0, base + NvRegWakeUpFlags);
5441 if (id->driver_data & DEV_HAS_POWER_CNTRL) {
5443 /* take phy and nic out of low power mode */
5444 powerstate = readl(base + NvRegPowerState2);
5445 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
5446 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
5447 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
5448 pci_dev->revision >= 0xA3)
5449 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
5450 writel(powerstate, base + NvRegPowerState2);
5453 if (np->desc_ver == DESC_VER_1) {
5454 np->tx_flags = NV_TX_VALID;
5456 np->tx_flags = NV_TX2_VALID;
5458 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
5459 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5460 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5461 np->msi_flags |= 0x0003;
5463 np->irqmask = NVREG_IRQMASK_CPU;
5464 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5465 np->msi_flags |= 0x0001;
5468 if (id->driver_data & DEV_NEED_TIMERIRQ)
5469 np->irqmask |= NVREG_IRQ_TIMER;
5470 if (id->driver_data & DEV_NEED_LINKTIMER) {
5471 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
5472 np->need_linktimer = 1;
5473 np->link_timeout = jiffies + LINK_TIMEOUT;
5475 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
5476 np->need_linktimer = 0;
5479 /* Limit the number of tx's outstanding for hw bug */
5480 if (id->driver_data & DEV_NEED_TX_LIMIT) {
5482 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_32 ||
5483 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
5484 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_34 ||
5485 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_35 ||
5486 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_36 ||
5487 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_37 ||
5488 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_38 ||
5489 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_39) &&
5490 pci_dev->revision >= 0xA2)
5494 /* clear phy state and temporarily halt phy interrupts */
5495 writel(0, base + NvRegMIIMask);
5496 phystate = readl(base + NvRegAdapterControl);
5497 if (phystate & NVREG_ADAPTCTL_RUNNING) {
5499 phystate &= ~NVREG_ADAPTCTL_RUNNING;
5500 writel(phystate, base + NvRegAdapterControl);
5502 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5504 if (id->driver_data & DEV_HAS_MGMT_UNIT) {
5505 /* management unit running on the mac? */
5506 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) {
5507 np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
5508 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use);
5509 if (nv_mgmt_acquire_sema(dev)) {
5510 /* management unit setup the phy already? */
5511 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5512 NVREG_XMITCTL_SYNC_PHY_INIT) {
5513 /* phy is inited by mgmt unit */
5515 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev));
5517 /* we need to init the phy */
5523 /* find a suitable phy */
5524 for (i = 1; i <= 32; i++) {
5526 int phyaddr = i & 0x1F;
5528 spin_lock_irq(&np->lock);
5529 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
5530 spin_unlock_irq(&np->lock);
5531 if (id1 < 0 || id1 == 0xffff)
5533 spin_lock_irq(&np->lock);
5534 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
5535 spin_unlock_irq(&np->lock);
5536 if (id2 < 0 || id2 == 0xffff)
5539 np->phy_model = id2 & PHYID2_MODEL_MASK;
5540 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
5541 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
5542 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
5543 pci_name(pci_dev), id1, id2, phyaddr);
5544 np->phyaddr = phyaddr;
5545 np->phy_oui = id1 | id2;
5549 dev_printk(KERN_INFO, &pci_dev->dev,
5550 "open: Could not find a valid PHY.\n");
5554 if (!phyinitialized) {
5558 /* see if it is a gigabit phy */
5559 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5560 if (mii_status & PHY_GIGABIT) {
5561 np->gigabit = PHY_GIGABIT;
5565 /* set default link speed settings */
5566 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
5570 err = register_netdev(dev);
5572 dev_printk(KERN_INFO, &pci_dev->dev,
5573 "unable to register netdev: %d\n", err);
5577 dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, "
5578 "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
5589 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
5590 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
5591 dev->features & (NETIF_F_HW_CSUM | NETIF_F_SG) ?
5593 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
5595 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
5596 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
5597 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
5598 np->gigabit == PHY_GIGABIT ? "gbit " : "",
5599 np->need_linktimer ? "lnktim " : "",
5600 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
5601 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
5608 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
5609 pci_set_drvdata(pci_dev, NULL);
5613 iounmap(get_hwbase(dev));
5615 pci_release_regions(pci_dev);
5617 pci_disable_device(pci_dev);
5624 static void __devexit nv_remove(struct pci_dev *pci_dev)
5626 struct net_device *dev = pci_get_drvdata(pci_dev);
5627 struct fe_priv *np = netdev_priv(dev);
5628 u8 __iomem *base = get_hwbase(dev);
5630 unregister_netdev(dev);
5632 /* special op: write back the misordered MAC address - otherwise
5633 * the next nv_probe would see a wrong address.
5635 writel(np->orig_mac[0], base + NvRegMacAddrA);
5636 writel(np->orig_mac[1], base + NvRegMacAddrB);
5637 writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
5638 base + NvRegTransmitPoll);
5640 /* free all structures */
5642 iounmap(get_hwbase(dev));
5643 pci_release_regions(pci_dev);
5644 pci_disable_device(pci_dev);
5646 pci_set_drvdata(pci_dev, NULL);
5650 static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
5652 struct net_device *dev = pci_get_drvdata(pdev);
5653 struct fe_priv *np = netdev_priv(dev);
5655 if (!netif_running(dev))
5658 netif_device_detach(dev);
5663 pci_save_state(pdev);
5664 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
5665 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5670 static int nv_resume(struct pci_dev *pdev)
5672 struct net_device *dev = pci_get_drvdata(pdev);
5673 u8 __iomem *base = get_hwbase(dev);
5677 if (!netif_running(dev))
5680 netif_device_attach(dev);
5682 pci_set_power_state(pdev, PCI_D0);
5683 pci_restore_state(pdev);
5684 pci_enable_wake(pdev, PCI_D0, 0);
5686 /* restore mac address reverse flag */
5687 txreg = readl(base + NvRegTransmitPoll);
5688 txreg |= NVREG_TRANSMITPOLL_MAC_ADDR_REV;
5689 writel(txreg, base + NvRegTransmitPoll);
5696 #define nv_suspend NULL
5697 #define nv_resume NULL
5698 #endif /* CONFIG_PM */
5700 static struct pci_device_id pci_tbl[] = {
5701 { /* nForce Ethernet Controller */
5702 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1),
5703 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5705 { /* nForce2 Ethernet Controller */
5706 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2),
5707 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5709 { /* nForce3 Ethernet Controller */
5710 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3),
5711 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5713 { /* nForce3 Ethernet Controller */
5714 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
5715 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5717 { /* nForce3 Ethernet Controller */
5718 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
5719 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5721 { /* nForce3 Ethernet Controller */
5722 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
5723 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5725 { /* nForce3 Ethernet Controller */
5726 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
5727 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5729 { /* CK804 Ethernet Controller */
5730 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
5731 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
5733 { /* CK804 Ethernet Controller */
5734 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
5735 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
5737 { /* MCP04 Ethernet Controller */
5738 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
5739 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
5741 { /* MCP04 Ethernet Controller */
5742 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
5743 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
5745 { /* MCP51 Ethernet Controller */
5746 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
5747 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
5749 { /* MCP51 Ethernet Controller */
5750 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
5751 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
5753 { /* MCP55 Ethernet Controller */
5754 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
5755 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT,
5757 { /* MCP55 Ethernet Controller */
5758 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
5759 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT,
5761 { /* MCP61 Ethernet Controller */
5762 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
5763 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5765 { /* MCP61 Ethernet Controller */
5766 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
5767 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5769 { /* MCP61 Ethernet Controller */
5770 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
5771 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5773 { /* MCP61 Ethernet Controller */
5774 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
5775 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5777 { /* MCP65 Ethernet Controller */
5778 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
5779 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
5781 { /* MCP65 Ethernet Controller */
5782 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
5783 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
5785 { /* MCP65 Ethernet Controller */
5786 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
5787 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
5789 { /* MCP65 Ethernet Controller */
5790 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
5791 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
5793 { /* MCP67 Ethernet Controller */
5794 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
5795 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
5797 { /* MCP67 Ethernet Controller */
5798 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
5799 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
5801 { /* MCP67 Ethernet Controller */
5802 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
5803 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
5805 { /* MCP67 Ethernet Controller */
5806 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
5807 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
5809 { /* MCP73 Ethernet Controller */
5810 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28),
5811 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
5813 { /* MCP73 Ethernet Controller */
5814 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29),
5815 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
5817 { /* MCP73 Ethernet Controller */
5818 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30),
5819 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
5821 { /* MCP73 Ethernet Controller */
5822 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31),
5823 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
5825 { /* MCP77 Ethernet Controller */
5826 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
5827 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
5829 { /* MCP77 Ethernet Controller */
5830 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
5831 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
5833 { /* MCP77 Ethernet Controller */
5834 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
5835 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
5837 { /* MCP77 Ethernet Controller */
5838 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
5839 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
5841 { /* MCP79 Ethernet Controller */
5842 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
5843 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
5845 { /* MCP79 Ethernet Controller */
5846 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
5847 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
5849 { /* MCP79 Ethernet Controller */
5850 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
5851 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
5853 { /* MCP79 Ethernet Controller */
5854 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
5855 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
5860 static struct pci_driver driver = {
5862 .id_table = pci_tbl,
5864 .remove = __devexit_p(nv_remove),
5865 .suspend = nv_suspend,
5866 .resume = nv_resume,
5869 static int __init init_nic(void)
5871 return pci_register_driver(&driver);
5874 static void __exit exit_nic(void)
5876 pci_unregister_driver(&driver);
5879 module_param(max_interrupt_work, int, 0);
5880 MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
5881 module_param(optimization_mode, int, 0);
5882 MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
5883 module_param(poll_interval, int, 0);
5884 MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
5885 module_param(msi, int, 0);
5886 MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
5887 module_param(msix, int, 0);
5888 MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
5889 module_param(dma_64bit, int, 0);
5890 MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
5892 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
5893 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
5894 MODULE_LICENSE("GPL");
5896 MODULE_DEVICE_TABLE(pci, pci_tbl);
5898 module_init(init_nic);
5899 module_exit(exit_nic);