1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
3 Written 1998-2001 by Donald Becker.
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
30 Linux kernel version history:
33 - Jeff Garzik: softnet 'n stuff
36 - Justin Guyett: softnet and locking fixes
37 - Jeff Garzik: use PCI interface
40 - Urban Widmark: minor cleanups, merges from Becker 1.03a/1.04 versions
43 - Urban Widmark: use PCI DMA interface (with thanks to the eepro100.c
44 code) update "Theory of Operation" with
45 softnet/locking changes
46 - Dave Miller: PCI DMA and endian fixups
47 - Jeff Garzik: MOD_xxx race fixes, updated PCI resource allocation
50 - Urban Widmark: fix gcc 2.95.2 problem and
51 remove writel's to fixed address 0x7c
54 - Urban Widmark: mdio locking, bounce buffer changes
55 merges from Beckers 1.05 version
56 added netif_running_on/off support
59 - Urban Widmark: merges from Beckers 1.08b version (VT6102 + mdio)
60 set netif_running_on/off on startup, del_timer_sync
63 - Manfred Spraul: added reset into tx_timeout
66 - Urban Widmark: merges from Beckers 1.10 version
67 (media selection + eeprom reload)
68 - David Vrabel: merges from D-Link "1.11" version
69 (disable WOL and PME on startup)
72 - Manfred Spraul: use "singlecopy" for unaligned buffers
73 don't allocate bounce buffers for !ReqTxAlign cards
76 - David Woodhouse: Set dev->base_addr before the first time we call
77 wait_for_reset(). It's a lot happier that way.
78 Free np->tx_bufs only if we actually allocated it.
81 - Martin Eriksson: Allow Memory-Mapped IO to be enabled.
85 - Replace some MII-related magic numbers with constants
88 - fixes comments for Rhine-III
89 - removes W_MAX_TIMEOUT (unused)
90 - adds HasDavicomPhy for Rhine-I (basis: linuxfet driver; my card
91 is R-I and has Davicom chip, flag is referenced in kernel driver)
92 - sends chip_id as a parameter to wait_for_reset since np is not
93 initialized on first call
94 - changes mmio "else if (chip_id==VT6102)" to "else" so it will work
95 for Rhine-III's (documentation says same bit is correct)
96 - transmit frame queue message is off by one - fixed
97 - adds IntrNormalSummary to "Something Wicked" exclusion list
98 so normal interrupts will not trigger the message (src: Donald Becker)
100 - show confused chip where to continue after Tx error
101 - location of collision counter is chip specific
102 - allow selecting backoff algorithm (module parameter)
105 - Use new MII lib helper generic_mii_ioctl
107 LK1.1.16 (Roger Luethi)
109 - Handle Tx buffer underrun
110 - Fix bugs in full duplex handling
111 - New reset code uses "force reset" cmd on Rhine-II
114 LK1.1.17 (Roger Luethi)
115 - Fix race in via_rhine_start_tx()
116 - On errors, wait for Tx engine to turn off before scavenging
117 - Handle Tx descriptor write-back race on Rhine-II
118 - Force flushing for PCI posted writes
119 - More reset code changes
121 LK1.1.18 (Roger Luethi)
122 - No filtering multicast in promisc mode (Edward Peng)
123 - Fix for Rhine-I Tx timeouts
125 LK1.1.19 (Roger Luethi)
126 - Increase Tx threshold for unspecified errors
128 LK1.2.0-2.6 (Roger Luethi)
130 - Rewrite PHY, media handling (remove options, full_duplex, backoff)
131 - Fix Tx engine race for good
132 - Craig Brind: Zero padded aligned buffers for short packets.
136 #define DRV_NAME "via-rhine"
137 #define DRV_VERSION "1.2.0-2.6"
138 #define DRV_RELDATE "June-10-2004"
141 /* A few user-configurable values.
142 These may be modified when a driver module is loaded. */
144 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
145 static int max_interrupt_work = 20;
147 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
148 Setting to > 1518 effectively disables this feature. */
149 static int rx_copybreak;
152 * In case you are looking for 'options[]' or 'full_duplex[]', they
153 * are gone. Use ethtool(8) instead.
156 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
157 The Rhine has a 64 element 8390-like hash table. */
158 static const int multicast_filter_limit = 32;
161 /* Operational parameters that are set at compile time. */
163 /* Keep the ring sizes a power of two for compile efficiency.
164 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
165 Making the Tx ring too large decreases the effectiveness of channel
166 bonding and packet priority.
167 There are no ill effects from too-large receive rings. */
168 #define TX_RING_SIZE 16
169 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
170 #define RX_RING_SIZE 16
173 /* Operational parameters that usually are not changed. */
175 /* Time in jiffies before concluding the transmitter is hung. */
176 #define TX_TIMEOUT (2*HZ)
178 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
180 #include <linux/module.h>
181 #include <linux/moduleparam.h>
182 #include <linux/kernel.h>
183 #include <linux/string.h>
184 #include <linux/timer.h>
185 #include <linux/errno.h>
186 #include <linux/ioport.h>
187 #include <linux/slab.h>
188 #include <linux/interrupt.h>
189 #include <linux/pci.h>
190 #include <linux/dma-mapping.h>
191 #include <linux/netdevice.h>
192 #include <linux/etherdevice.h>
193 #include <linux/skbuff.h>
194 #include <linux/init.h>
195 #include <linux/delay.h>
196 #include <linux/mii.h>
197 #include <linux/ethtool.h>
198 #include <linux/crc32.h>
199 #include <linux/bitops.h>
200 #include <asm/processor.h> /* Processor type for cache alignment. */
203 #include <asm/uaccess.h>
205 /* These identify the driver base version and may not be removed. */
206 static char version[] __devinitdata =
207 KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
209 /* This driver was written to use PCI memory space. Some early versions
210 of the Rhine may only work correctly with I/O space accesses. */
211 #ifdef CONFIG_VIA_RHINE_MMIO
216 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
217 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
218 MODULE_LICENSE("GPL");
220 module_param(max_interrupt_work, int, 0);
221 module_param(debug, int, 0);
222 module_param(rx_copybreak, int, 0);
223 MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
224 MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
225 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
230 I. Board Compatibility
232 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
235 II. Board-specific settings
237 Boards with this chip are functional only in a bus-master PCI slot.
239 Many operational settings are loaded from the EEPROM to the Config word at
240 offset 0x78. For most of these settings, this driver assumes that they are
242 If this driver is compiled to use PCI memory space operations the EEPROM
243 must be configured to enable memory ops.
245 III. Driver operation
249 This driver uses two statically allocated fixed-size descriptor lists
250 formed into rings by a branch from the final descriptor to the beginning of
251 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
253 IIIb/c. Transmit/Receive Structure
255 This driver attempts to use a zero-copy receive and transmit scheme.
257 Alas, all data buffers are required to start on a 32 bit boundary, so
258 the driver must often copy transmit packets into bounce buffers.
260 The driver allocates full frame size skbuffs for the Rx ring buffers at
261 open() time and passes the skb->data field to the chip as receive data
262 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
263 a fresh skbuff is allocated and the frame is copied to the new skbuff.
264 When the incoming frame is larger, the skbuff is passed directly up the
265 protocol stack. Buffers consumed this way are replaced by newly allocated
266 skbuffs in the last phase of rhine_rx().
268 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
269 using a full-sized skbuff for small frames vs. the copying costs of larger
270 frames. New boards are typically used in generously configured machines
271 and the underfilled buffers have negligible impact compared to the benefit of
272 a single allocation size, so the default value of zero results in never
273 copying packets. When copying is done, the cost is usually mitigated by using
274 a combined copy/checksum routine. Copying also preloads the cache, which is
275 most useful with small frames.
277 Since the VIA chips are only able to transfer data to buffers on 32 bit
278 boundaries, the IP header at offset 14 in an ethernet frame isn't
279 longword aligned for further processing. Copying these unaligned buffers
280 has the beneficial effect of 16-byte aligning the IP header.
282 IIId. Synchronization
284 The driver runs as two independent, single-threaded flows of control. One
285 is the send-packet routine, which enforces single-threaded use by the
286 dev->priv->lock spinlock. The other thread is the interrupt handler, which
287 is single threaded by the hardware and interrupt handling software.
289 The send packet thread has partial control over the Tx ring. It locks the
290 dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
291 is not available it stops the transmit queue by calling netif_stop_queue.
293 The interrupt handler has exclusive control over the Rx ring and records stats
294 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
295 empty by incrementing the dirty_tx mark. If at least half of the entries in
296 the Rx ring are available the transmit queue is woken up if it was stopped.
302 Preliminary VT86C100A manual from http://www.via.com.tw/
303 http://www.scyld.com/expert/100mbps.html
304 http://www.scyld.com/expert/NWay.html
305 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
306 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
311 The VT86C100A manual is not reliable information.
312 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
313 in significant performance degradation for bounce buffer copies on transmit
314 and unaligned IP headers on receive.
315 The chip does not pad to minimum transmit length.
320 /* This table drives the PCI probe routines. It's mostly boilerplate in all
321 of the drivers, and will likely be provided by some future kernel.
322 Note the matching code -- the first table entry matchs all 56** cards but
323 second only the 1234 card.
330 VT8231 = 0x50, /* Integrated MAC */
331 VT8233 = 0x60, /* Integrated MAC */
332 VT8235 = 0x74, /* Integrated MAC */
333 VT8237 = 0x78, /* Integrated MAC */
340 VT6105M = 0x90, /* Management adapter */
344 rqWOL = 0x0001, /* Wake-On-LAN support */
345 rqForceReset = 0x0002,
346 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
347 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
348 rqRhineI = 0x0100, /* See comment below */
351 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
352 * MMIO as well as for the collision counter and the Tx FIFO underflow
353 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
356 /* Beware of PCI posted writes */
357 #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
359 static struct pci_device_id rhine_pci_tbl[] =
361 {0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT86C100A */
362 {0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6102 */
363 {0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* 6105{,L,LOM} */
364 {0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6105M */
365 { } /* terminate list */
367 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
370 /* Offsets to the device registers. */
371 enum register_offsets {
372 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
374 IntrStatus=0x0C, IntrEnable=0x0E,
375 MulticastFilter0=0x10, MulticastFilter1=0x14,
376 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
377 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
378 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
379 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
380 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
381 StickyHW=0x83, IntrStatus2=0x84,
382 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
383 WOLcrClr1=0xA6, WOLcgClr=0xA7,
384 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
387 /* Bits in ConfigD */
389 BackOptional=0x01, BackModify=0x02,
390 BackCaptureEffect=0x04, BackRandom=0x08
394 /* Registers we check that mmio and reg are the same. */
395 static const int mmio_verify_registers[] = {
396 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
401 /* Bits in the interrupt status/mask registers. */
402 enum intr_status_bits {
403 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
404 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
406 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
407 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
408 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
410 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
411 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
412 IntrTxErrSummary=0x082218,
415 /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
424 /* The Rx and Tx buffer descriptors. */
427 u32 desc_length; /* Chain flag, Buffer/frame length */
433 u32 desc_length; /* Chain flag, Tx Config, Frame length */
438 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
439 #define TXDESC 0x00e08000
441 enum rx_status_bits {
442 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
445 /* Bits in *_desc.*_status */
446 enum desc_status_bits {
450 /* Bits in ChipCmd. */
452 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
453 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
454 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
455 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
458 struct rhine_private {
459 /* Descriptor rings */
460 struct rx_desc *rx_ring;
461 struct tx_desc *tx_ring;
462 dma_addr_t rx_ring_dma;
463 dma_addr_t tx_ring_dma;
465 /* The addresses of receive-in-place skbuffs. */
466 struct sk_buff *rx_skbuff[RX_RING_SIZE];
467 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
469 /* The saved address of a sent-in-place packet/buffer, for later free(). */
470 struct sk_buff *tx_skbuff[TX_RING_SIZE];
471 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
473 /* Tx bounce buffers (Rhine-I only) */
474 unsigned char *tx_buf[TX_RING_SIZE];
475 unsigned char *tx_bufs;
476 dma_addr_t tx_bufs_dma;
478 struct pci_dev *pdev;
480 struct net_device_stats stats;
483 /* Frequently used values: keep some adjacent for cache effect. */
485 struct rx_desc *rx_head_desc;
486 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
487 unsigned int cur_tx, dirty_tx;
488 unsigned int rx_buf_sz; /* Based on MTU+slack. */
491 u8 tx_thresh, rx_thresh;
493 struct mii_if_info mii_if;
494 struct work_struct tx_timeout_task;
495 struct work_struct check_media_task;
499 static int mdio_read(struct net_device *dev, int phy_id, int location);
500 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
501 static int rhine_open(struct net_device *dev);
502 static void rhine_tx_timeout(struct net_device *dev);
503 static void rhine_tx_timeout_task(struct net_device *dev);
504 static void rhine_check_media_task(struct net_device *dev);
505 static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
506 static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
507 static void rhine_tx(struct net_device *dev);
508 static void rhine_rx(struct net_device *dev);
509 static void rhine_error(struct net_device *dev, int intr_status);
510 static void rhine_set_rx_mode(struct net_device *dev);
511 static struct net_device_stats *rhine_get_stats(struct net_device *dev);
512 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
513 static struct ethtool_ops netdev_ethtool_ops;
514 static int rhine_close(struct net_device *dev);
515 static void rhine_shutdown (struct pci_dev *pdev);
517 #define RHINE_WAIT_FOR(condition) do { \
519 while (!(condition) && --i) \
521 if (debug > 1 && i < 512) \
522 printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \
523 DRV_NAME, 1024-i, __func__, __LINE__); \
526 static inline u32 get_intr_status(struct net_device *dev)
528 struct rhine_private *rp = netdev_priv(dev);
529 void __iomem *ioaddr = rp->base;
532 intr_status = ioread16(ioaddr + IntrStatus);
533 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
534 if (rp->quirks & rqStatusWBRace)
535 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
540 * Get power related registers into sane state.
541 * Notify user about past WOL event.
543 static void rhine_power_init(struct net_device *dev)
545 struct rhine_private *rp = netdev_priv(dev);
546 void __iomem *ioaddr = rp->base;
549 if (rp->quirks & rqWOL) {
550 /* Make sure chip is in power state D0 */
551 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
553 /* Disable "force PME-enable" */
554 iowrite8(0x80, ioaddr + WOLcgClr);
556 /* Clear power-event config bits (WOL) */
557 iowrite8(0xFF, ioaddr + WOLcrClr);
558 /* More recent cards can manage two additional patterns */
559 if (rp->quirks & rq6patterns)
560 iowrite8(0x03, ioaddr + WOLcrClr1);
562 /* Save power-event status bits */
563 wolstat = ioread8(ioaddr + PwrcsrSet);
564 if (rp->quirks & rq6patterns)
565 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
567 /* Clear power-event status bits */
568 iowrite8(0xFF, ioaddr + PwrcsrClr);
569 if (rp->quirks & rq6patterns)
570 iowrite8(0x03, ioaddr + PwrcsrClr1);
576 reason = "Magic packet";
579 reason = "Link went up";
582 reason = "Link went down";
585 reason = "Unicast packet";
588 reason = "Multicast/broadcast packet";
593 printk(KERN_INFO "%s: Woke system up. Reason: %s.\n",
599 static void rhine_chip_reset(struct net_device *dev)
601 struct rhine_private *rp = netdev_priv(dev);
602 void __iomem *ioaddr = rp->base;
604 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
607 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
608 printk(KERN_INFO "%s: Reset not complete yet. "
609 "Trying harder.\n", DRV_NAME);
612 if (rp->quirks & rqForceReset)
613 iowrite8(0x40, ioaddr + MiscCmd);
615 /* Reset can take somewhat longer (rare) */
616 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
620 printk(KERN_INFO "%s: Reset %s.\n", dev->name,
621 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
622 "failed" : "succeeded");
626 static void enable_mmio(long pioaddr, u32 quirks)
629 if (quirks & rqRhineI) {
630 /* More recent docs say that this bit is reserved ... */
631 n = inb(pioaddr + ConfigA) | 0x20;
632 outb(n, pioaddr + ConfigA);
634 n = inb(pioaddr + ConfigD) | 0x80;
635 outb(n, pioaddr + ConfigD);
641 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
642 * (plus 0x6C for Rhine-I/II)
644 static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
646 struct rhine_private *rp = netdev_priv(dev);
647 void __iomem *ioaddr = rp->base;
649 outb(0x20, pioaddr + MACRegEEcsr);
650 RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
654 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
655 * MMIO. If reloading EEPROM was done first this could be avoided, but
656 * it is not known if that still works with the "win98-reboot" problem.
658 enable_mmio(pioaddr, rp->quirks);
661 /* Turn off EEPROM-controlled wake-up (magic packet) */
662 if (rp->quirks & rqWOL)
663 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
667 #ifdef CONFIG_NET_POLL_CONTROLLER
668 static void rhine_poll(struct net_device *dev)
670 disable_irq(dev->irq);
671 rhine_interrupt(dev->irq, (void *)dev, NULL);
672 enable_irq(dev->irq);
676 static void rhine_hw_init(struct net_device *dev, long pioaddr)
678 struct rhine_private *rp = netdev_priv(dev);
680 /* Reset the chip to erase previous misconfiguration. */
681 rhine_chip_reset(dev);
683 /* Rhine-I needs extra time to recuperate before EEPROM reload */
684 if (rp->quirks & rqRhineI)
687 /* Reload EEPROM controlled bytes cleared by soft reset */
688 rhine_reload_eeprom(pioaddr, dev);
691 static int __devinit rhine_init_one(struct pci_dev *pdev,
692 const struct pci_device_id *ent)
694 struct net_device *dev;
695 struct rhine_private *rp;
701 void __iomem *ioaddr;
710 /* when built into the kernel, we only print version if device is found */
712 static int printed_version;
713 if (!printed_version++)
717 pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
723 if (pci_rev < VTunknown0) {
727 else if (pci_rev >= VT6102) {
728 quirks = rqWOL | rqForceReset;
729 if (pci_rev < VT6105) {
731 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
734 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
735 if (pci_rev >= VT6105_B0)
736 quirks |= rq6patterns;
737 if (pci_rev < VT6105M)
740 name = "Rhine III (Management Adapter)";
744 rc = pci_enable_device(pdev);
748 /* this should always be supported */
749 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
751 printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
757 if ((pci_resource_len(pdev, 0) < io_size) ||
758 (pci_resource_len(pdev, 1) < io_size)) {
760 printk(KERN_ERR "Insufficient PCI resources, aborting\n");
764 pioaddr = pci_resource_start(pdev, 0);
765 memaddr = pci_resource_start(pdev, 1);
767 pci_set_master(pdev);
769 dev = alloc_etherdev(sizeof(struct rhine_private));
772 printk(KERN_ERR "alloc_etherdev failed\n");
775 SET_MODULE_OWNER(dev);
776 SET_NETDEV_DEV(dev, &pdev->dev);
778 rp = netdev_priv(dev);
780 rp->pioaddr = pioaddr;
783 rc = pci_request_regions(pdev, DRV_NAME);
785 goto err_out_free_netdev;
787 ioaddr = pci_iomap(pdev, bar, io_size);
790 printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
791 "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
792 goto err_out_free_res;
796 enable_mmio(pioaddr, quirks);
798 /* Check that selected MMIO registers match the PIO ones */
800 while (mmio_verify_registers[i]) {
801 int reg = mmio_verify_registers[i++];
802 unsigned char a = inb(pioaddr+reg);
803 unsigned char b = readb(ioaddr+reg);
806 printk(KERN_ERR "MMIO do not match PIO [%02x] "
807 "(%02x != %02x)\n", reg, a, b);
811 #endif /* USE_MMIO */
813 dev->base_addr = (unsigned long)ioaddr;
816 /* Get chip registers into a sane state */
817 rhine_power_init(dev);
818 rhine_hw_init(dev, pioaddr);
820 for (i = 0; i < 6; i++)
821 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
822 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
824 if (!is_valid_ether_addr(dev->perm_addr)) {
826 printk(KERN_ERR "Invalid MAC address\n");
830 /* For Rhine-I/II, phy_id is loaded from EEPROM */
832 phy_id = ioread8(ioaddr + 0x6C);
834 dev->irq = pdev->irq;
836 spin_lock_init(&rp->lock);
837 rp->mii_if.dev = dev;
838 rp->mii_if.mdio_read = mdio_read;
839 rp->mii_if.mdio_write = mdio_write;
840 rp->mii_if.phy_id_mask = 0x1f;
841 rp->mii_if.reg_num_mask = 0x1f;
843 /* The chip-specific entries in the device structure. */
844 dev->open = rhine_open;
845 dev->hard_start_xmit = rhine_start_tx;
846 dev->stop = rhine_close;
847 dev->get_stats = rhine_get_stats;
848 dev->set_multicast_list = rhine_set_rx_mode;
849 dev->do_ioctl = netdev_ioctl;
850 dev->ethtool_ops = &netdev_ethtool_ops;
851 dev->tx_timeout = rhine_tx_timeout;
852 dev->watchdog_timeo = TX_TIMEOUT;
853 #ifdef CONFIG_NET_POLL_CONTROLLER
854 dev->poll_controller = rhine_poll;
856 if (rp->quirks & rqRhineI)
857 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
859 INIT_WORK(&rp->tx_timeout_task,
860 (void (*)(void *))rhine_tx_timeout_task, dev);
862 INIT_WORK(&rp->check_media_task,
863 (void (*)(void *))rhine_check_media_task, dev);
865 /* dev->name not defined before register_netdev()! */
866 rc = register_netdev(dev);
870 printk(KERN_INFO "%s: VIA %s at 0x%lx, ",
879 for (i = 0; i < 5; i++)
880 printk("%2.2x:", dev->dev_addr[i]);
881 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
883 pci_set_drvdata(pdev, dev);
887 int mii_status = mdio_read(dev, phy_id, 1);
888 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
889 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
890 if (mii_status != 0xffff && mii_status != 0x0000) {
891 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
892 printk(KERN_INFO "%s: MII PHY found at address "
893 "%d, status 0x%4.4x advertising %4.4x "
894 "Link %4.4x.\n", dev->name, phy_id,
895 mii_status, rp->mii_if.advertising,
896 mdio_read(dev, phy_id, 5));
898 /* set IFF_RUNNING */
899 if (mii_status & BMSR_LSTATUS)
900 netif_carrier_on(dev);
902 netif_carrier_off(dev);
906 rp->mii_if.phy_id = phy_id;
911 pci_iounmap(pdev, ioaddr);
913 pci_release_regions(pdev);
920 static int alloc_ring(struct net_device* dev)
922 struct rhine_private *rp = netdev_priv(dev);
926 ring = pci_alloc_consistent(rp->pdev,
927 RX_RING_SIZE * sizeof(struct rx_desc) +
928 TX_RING_SIZE * sizeof(struct tx_desc),
931 printk(KERN_ERR "Could not allocate DMA memory.\n");
934 if (rp->quirks & rqRhineI) {
935 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
936 PKT_BUF_SZ * TX_RING_SIZE,
938 if (rp->tx_bufs == NULL) {
939 pci_free_consistent(rp->pdev,
940 RX_RING_SIZE * sizeof(struct rx_desc) +
941 TX_RING_SIZE * sizeof(struct tx_desc),
948 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
949 rp->rx_ring_dma = ring_dma;
950 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
955 static void free_ring(struct net_device* dev)
957 struct rhine_private *rp = netdev_priv(dev);
959 pci_free_consistent(rp->pdev,
960 RX_RING_SIZE * sizeof(struct rx_desc) +
961 TX_RING_SIZE * sizeof(struct tx_desc),
962 rp->rx_ring, rp->rx_ring_dma);
966 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
967 rp->tx_bufs, rp->tx_bufs_dma);
973 static void alloc_rbufs(struct net_device *dev)
975 struct rhine_private *rp = netdev_priv(dev);
979 rp->dirty_rx = rp->cur_rx = 0;
981 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
982 rp->rx_head_desc = &rp->rx_ring[0];
983 next = rp->rx_ring_dma;
985 /* Init the ring entries */
986 for (i = 0; i < RX_RING_SIZE; i++) {
987 rp->rx_ring[i].rx_status = 0;
988 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
989 next += sizeof(struct rx_desc);
990 rp->rx_ring[i].next_desc = cpu_to_le32(next);
991 rp->rx_skbuff[i] = NULL;
993 /* Mark the last entry as wrapping the ring. */
994 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
996 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
997 for (i = 0; i < RX_RING_SIZE; i++) {
998 struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz);
999 rp->rx_skbuff[i] = skb;
1002 skb->dev = dev; /* Mark as being used by this device. */
1004 rp->rx_skbuff_dma[i] =
1005 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
1006 PCI_DMA_FROMDEVICE);
1008 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1009 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1011 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1014 static void free_rbufs(struct net_device* dev)
1016 struct rhine_private *rp = netdev_priv(dev);
1019 /* Free all the skbuffs in the Rx queue. */
1020 for (i = 0; i < RX_RING_SIZE; i++) {
1021 rp->rx_ring[i].rx_status = 0;
1022 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1023 if (rp->rx_skbuff[i]) {
1024 pci_unmap_single(rp->pdev,
1025 rp->rx_skbuff_dma[i],
1026 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1027 dev_kfree_skb(rp->rx_skbuff[i]);
1029 rp->rx_skbuff[i] = NULL;
1033 static void alloc_tbufs(struct net_device* dev)
1035 struct rhine_private *rp = netdev_priv(dev);
1039 rp->dirty_tx = rp->cur_tx = 0;
1040 next = rp->tx_ring_dma;
1041 for (i = 0; i < TX_RING_SIZE; i++) {
1042 rp->tx_skbuff[i] = NULL;
1043 rp->tx_ring[i].tx_status = 0;
1044 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1045 next += sizeof(struct tx_desc);
1046 rp->tx_ring[i].next_desc = cpu_to_le32(next);
1047 if (rp->quirks & rqRhineI)
1048 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1050 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1054 static void free_tbufs(struct net_device* dev)
1056 struct rhine_private *rp = netdev_priv(dev);
1059 for (i = 0; i < TX_RING_SIZE; i++) {
1060 rp->tx_ring[i].tx_status = 0;
1061 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1062 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1063 if (rp->tx_skbuff[i]) {
1064 if (rp->tx_skbuff_dma[i]) {
1065 pci_unmap_single(rp->pdev,
1066 rp->tx_skbuff_dma[i],
1067 rp->tx_skbuff[i]->len,
1070 dev_kfree_skb(rp->tx_skbuff[i]);
1072 rp->tx_skbuff[i] = NULL;
1073 rp->tx_buf[i] = NULL;
1077 static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1079 struct rhine_private *rp = netdev_priv(dev);
1080 void __iomem *ioaddr = rp->base;
1082 mii_check_media(&rp->mii_if, debug, init_media);
1084 if (rp->mii_if.full_duplex)
1085 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1088 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1091 printk(KERN_INFO "%s: force_media %d, carrier %d\n", dev->name,
1092 rp->mii_if.force_media, netif_carrier_ok(dev));
1095 /* Called after status of force_media possibly changed */
1096 static void rhine_set_carrier(struct mii_if_info *mii)
1098 if (mii->force_media) {
1099 /* autoneg is off: Link is always assumed to be up */
1100 if (!netif_carrier_ok(mii->dev))
1101 netif_carrier_on(mii->dev);
1103 else /* Let MMI library update carrier status */
1104 rhine_check_media(mii->dev, 0);
1106 printk(KERN_INFO "%s: force_media %d, carrier %d\n",
1107 mii->dev->name, mii->force_media,
1108 netif_carrier_ok(mii->dev));
1111 static void rhine_check_media_task(struct net_device *dev)
1113 rhine_check_media(dev, 0);
1116 static void init_registers(struct net_device *dev)
1118 struct rhine_private *rp = netdev_priv(dev);
1119 void __iomem *ioaddr = rp->base;
1122 for (i = 0; i < 6; i++)
1123 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1125 /* Initialize other registers. */
1126 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1127 /* Configure initial FIFO thresholds. */
1128 iowrite8(0x20, ioaddr + TxConfig);
1129 rp->tx_thresh = 0x20;
1130 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1132 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1133 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1135 rhine_set_rx_mode(dev);
1137 /* Enable interrupts by setting the interrupt mask. */
1138 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1139 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1140 IntrTxDone | IntrTxError | IntrTxUnderrun |
1141 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1142 ioaddr + IntrEnable);
1144 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1146 rhine_check_media(dev, 1);
1149 /* Enable MII link status auto-polling (required for IntrLinkChange) */
1150 static void rhine_enable_linkmon(void __iomem *ioaddr)
1152 iowrite8(0, ioaddr + MIICmd);
1153 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1154 iowrite8(0x80, ioaddr + MIICmd);
1156 RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1158 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1161 /* Disable MII link status auto-polling (required for MDIO access) */
1162 static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1164 iowrite8(0, ioaddr + MIICmd);
1166 if (quirks & rqRhineI) {
1167 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1169 /* Do not call from ISR! */
1172 /* 0x80 must be set immediately before turning it off */
1173 iowrite8(0x80, ioaddr + MIICmd);
1175 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1177 /* Heh. Now clear 0x80 again. */
1178 iowrite8(0, ioaddr + MIICmd);
1181 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1184 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1186 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1188 struct rhine_private *rp = netdev_priv(dev);
1189 void __iomem *ioaddr = rp->base;
1192 rhine_disable_linkmon(ioaddr, rp->quirks);
1194 /* rhine_disable_linkmon already cleared MIICmd */
1195 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1196 iowrite8(regnum, ioaddr + MIIRegAddr);
1197 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1198 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1199 result = ioread16(ioaddr + MIIData);
1201 rhine_enable_linkmon(ioaddr);
1205 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1207 struct rhine_private *rp = netdev_priv(dev);
1208 void __iomem *ioaddr = rp->base;
1210 rhine_disable_linkmon(ioaddr, rp->quirks);
1212 /* rhine_disable_linkmon already cleared MIICmd */
1213 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1214 iowrite8(regnum, ioaddr + MIIRegAddr);
1215 iowrite16(value, ioaddr + MIIData);
1216 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1217 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1219 rhine_enable_linkmon(ioaddr);
1222 static int rhine_open(struct net_device *dev)
1224 struct rhine_private *rp = netdev_priv(dev);
1225 void __iomem *ioaddr = rp->base;
1228 rc = request_irq(rp->pdev->irq, &rhine_interrupt, SA_SHIRQ, dev->name,
1234 printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
1235 dev->name, rp->pdev->irq);
1237 rc = alloc_ring(dev);
1239 free_irq(rp->pdev->irq, dev);
1244 rhine_chip_reset(dev);
1245 init_registers(dev);
1247 printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
1248 "MII status: %4.4x.\n",
1249 dev->name, ioread16(ioaddr + ChipCmd),
1250 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1252 netif_start_queue(dev);
1257 static void rhine_tx_timeout(struct net_device *dev)
1259 struct rhine_private *rp = netdev_priv(dev);
1262 * Move bulk of work outside of interrupt context
1264 schedule_work(&rp->tx_timeout_task);
1267 static void rhine_tx_timeout_task(struct net_device *dev)
1269 struct rhine_private *rp = netdev_priv(dev);
1270 void __iomem *ioaddr = rp->base;
1272 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1273 "%4.4x, resetting...\n",
1274 dev->name, ioread16(ioaddr + IntrStatus),
1275 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1277 /* protect against concurrent rx interrupts */
1278 disable_irq(rp->pdev->irq);
1280 spin_lock(&rp->lock);
1282 /* clear all descriptors */
1288 /* Reinitialize the hardware. */
1289 rhine_chip_reset(dev);
1290 init_registers(dev);
1292 spin_unlock(&rp->lock);
1293 enable_irq(rp->pdev->irq);
1295 dev->trans_start = jiffies;
1296 rp->stats.tx_errors++;
1297 netif_wake_queue(dev);
1300 static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1302 struct rhine_private *rp = netdev_priv(dev);
1303 void __iomem *ioaddr = rp->base;
1306 /* Caution: the write order is important here, set the field
1307 with the "ownership" bits last. */
1309 /* Calculate the next Tx descriptor entry. */
1310 entry = rp->cur_tx % TX_RING_SIZE;
1312 if (skb->len < ETH_ZLEN) {
1313 skb = skb_padto(skb, ETH_ZLEN);
1318 rp->tx_skbuff[entry] = skb;
1320 if ((rp->quirks & rqRhineI) &&
1321 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) {
1322 /* Must use alignment buffer. */
1323 if (skb->len > PKT_BUF_SZ) {
1324 /* packet too long, drop it */
1326 rp->tx_skbuff[entry] = NULL;
1327 rp->stats.tx_dropped++;
1331 /* Padding is not copied and so must be redone. */
1332 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1333 if (skb->len < ETH_ZLEN)
1334 memset(rp->tx_buf[entry] + skb->len, 0,
1335 ETH_ZLEN - skb->len);
1336 rp->tx_skbuff_dma[entry] = 0;
1337 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1338 (rp->tx_buf[entry] -
1341 rp->tx_skbuff_dma[entry] =
1342 pci_map_single(rp->pdev, skb->data, skb->len,
1344 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1347 rp->tx_ring[entry].desc_length =
1348 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1351 spin_lock_irq(&rp->lock);
1353 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1358 /* Non-x86 Todo: explicitly flush cache lines here. */
1360 /* Wake the potentially-idle transmit channel */
1361 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1365 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1366 netif_stop_queue(dev);
1368 dev->trans_start = jiffies;
1370 spin_unlock_irq(&rp->lock);
1373 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1374 dev->name, rp->cur_tx-1, entry);
1379 /* The interrupt handler does all of the Rx thread work and cleans up
1380 after the Tx thread. */
1381 static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1383 struct net_device *dev = dev_instance;
1384 struct rhine_private *rp = netdev_priv(dev);
1385 void __iomem *ioaddr = rp->base;
1387 int boguscnt = max_interrupt_work;
1390 while ((intr_status = get_intr_status(dev))) {
1393 /* Acknowledge all of the current interrupt sources ASAP. */
1394 if (intr_status & IntrTxDescRace)
1395 iowrite8(0x08, ioaddr + IntrStatus2);
1396 iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1400 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1401 dev->name, intr_status);
1403 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1404 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
1407 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1408 if (intr_status & IntrTxErrSummary) {
1409 /* Avoid scavenging before Tx engine turned off */
1410 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1412 ioread8(ioaddr+ChipCmd) & CmdTxOn)
1413 printk(KERN_WARNING "%s: "
1414 "rhine_interrupt() Tx engine"
1415 "still on.\n", dev->name);
1420 /* Abnormal error summary/uncommon events handlers. */
1421 if (intr_status & (IntrPCIErr | IntrLinkChange |
1422 IntrStatsMax | IntrTxError | IntrTxAborted |
1423 IntrTxUnderrun | IntrTxDescRace))
1424 rhine_error(dev, intr_status);
1426 if (--boguscnt < 0) {
1427 printk(KERN_WARNING "%s: Too much work at interrupt, "
1429 dev->name, intr_status);
1435 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1436 dev->name, ioread16(ioaddr + IntrStatus));
1437 return IRQ_RETVAL(handled);
1440 /* This routine is logically part of the interrupt handler, but isolated
1442 static void rhine_tx(struct net_device *dev)
1444 struct rhine_private *rp = netdev_priv(dev);
1445 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1447 spin_lock(&rp->lock);
1449 /* find and cleanup dirty tx descriptors */
1450 while (rp->dirty_tx != rp->cur_tx) {
1451 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1453 printk(KERN_DEBUG "Tx scavenge %d status %8.8x.\n",
1455 if (txstatus & DescOwn)
1457 if (txstatus & 0x8000) {
1459 printk(KERN_DEBUG "%s: Transmit error, "
1460 "Tx status %8.8x.\n",
1461 dev->name, txstatus);
1462 rp->stats.tx_errors++;
1463 if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;
1464 if (txstatus & 0x0200) rp->stats.tx_window_errors++;
1465 if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;
1466 if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;
1467 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1468 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1469 rp->stats.tx_fifo_errors++;
1470 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1471 break; /* Keep the skb - we try again */
1473 /* Transmitter restarted in 'abnormal' handler. */
1475 if (rp->quirks & rqRhineI)
1476 rp->stats.collisions += (txstatus >> 3) & 0x0F;
1478 rp->stats.collisions += txstatus & 0x0F;
1480 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1481 (txstatus >> 3) & 0xF,
1483 rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1484 rp->stats.tx_packets++;
1486 /* Free the original skb. */
1487 if (rp->tx_skbuff_dma[entry]) {
1488 pci_unmap_single(rp->pdev,
1489 rp->tx_skbuff_dma[entry],
1490 rp->tx_skbuff[entry]->len,
1493 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1494 rp->tx_skbuff[entry] = NULL;
1495 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1497 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1498 netif_wake_queue(dev);
1500 spin_unlock(&rp->lock);
1503 /* This routine is logically part of the interrupt handler, but isolated
1504 for clarity and better register allocation. */
1505 static void rhine_rx(struct net_device *dev)
1507 struct rhine_private *rp = netdev_priv(dev);
1508 int entry = rp->cur_rx % RX_RING_SIZE;
1509 int boguscnt = rp->dirty_rx + RX_RING_SIZE - rp->cur_rx;
1512 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
1514 le32_to_cpu(rp->rx_head_desc->rx_status));
1517 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1518 while (!(rp->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
1519 struct rx_desc *desc = rp->rx_head_desc;
1520 u32 desc_status = le32_to_cpu(desc->rx_status);
1521 int data_size = desc_status >> 16;
1524 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
1528 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1529 if ((desc_status & RxWholePkt) != RxWholePkt) {
1530 printk(KERN_WARNING "%s: Oversized Ethernet "
1531 "frame spanned multiple buffers, entry "
1532 "%#x length %d status %8.8x!\n",
1533 dev->name, entry, data_size,
1535 printk(KERN_WARNING "%s: Oversized Ethernet "
1536 "frame %p vs %p.\n", dev->name,
1537 rp->rx_head_desc, &rp->rx_ring[entry]);
1538 rp->stats.rx_length_errors++;
1539 } else if (desc_status & RxErr) {
1540 /* There was a error. */
1542 printk(KERN_DEBUG "rhine_rx() Rx "
1543 "error was %8.8x.\n",
1545 rp->stats.rx_errors++;
1546 if (desc_status & 0x0030) rp->stats.rx_length_errors++;
1547 if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;
1548 if (desc_status & 0x0004) rp->stats.rx_frame_errors++;
1549 if (desc_status & 0x0002) {
1550 /* this can also be updated outside the interrupt handler */
1551 spin_lock(&rp->lock);
1552 rp->stats.rx_crc_errors++;
1553 spin_unlock(&rp->lock);
1557 struct sk_buff *skb;
1558 /* Length should omit the CRC */
1559 int pkt_len = data_size - 4;
1561 /* Check if the packet is long enough to accept without
1562 copying to a minimally-sized skbuff. */
1563 if (pkt_len < rx_copybreak &&
1564 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1566 skb_reserve(skb, 2); /* 16 byte align the IP header */
1567 pci_dma_sync_single_for_cpu(rp->pdev,
1568 rp->rx_skbuff_dma[entry],
1570 PCI_DMA_FROMDEVICE);
1572 eth_copy_and_sum(skb,
1573 rp->rx_skbuff[entry]->data,
1575 skb_put(skb, pkt_len);
1576 pci_dma_sync_single_for_device(rp->pdev,
1577 rp->rx_skbuff_dma[entry],
1579 PCI_DMA_FROMDEVICE);
1581 skb = rp->rx_skbuff[entry];
1583 printk(KERN_ERR "%s: Inconsistent Rx "
1584 "descriptor chain.\n",
1588 rp->rx_skbuff[entry] = NULL;
1589 skb_put(skb, pkt_len);
1590 pci_unmap_single(rp->pdev,
1591 rp->rx_skbuff_dma[entry],
1593 PCI_DMA_FROMDEVICE);
1595 skb->protocol = eth_type_trans(skb, dev);
1597 dev->last_rx = jiffies;
1598 rp->stats.rx_bytes += pkt_len;
1599 rp->stats.rx_packets++;
1601 entry = (++rp->cur_rx) % RX_RING_SIZE;
1602 rp->rx_head_desc = &rp->rx_ring[entry];
1605 /* Refill the Rx ring buffers. */
1606 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1607 struct sk_buff *skb;
1608 entry = rp->dirty_rx % RX_RING_SIZE;
1609 if (rp->rx_skbuff[entry] == NULL) {
1610 skb = dev_alloc_skb(rp->rx_buf_sz);
1611 rp->rx_skbuff[entry] = skb;
1613 break; /* Better luck next round. */
1614 skb->dev = dev; /* Mark as being used by this device. */
1615 rp->rx_skbuff_dma[entry] =
1616 pci_map_single(rp->pdev, skb->data,
1618 PCI_DMA_FROMDEVICE);
1619 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1621 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1626 * Clears the "tally counters" for CRC errors and missed frames(?).
1627 * It has been reported that some chips need a write of 0 to clear
1628 * these, for others the counters are set to 1 when written to and
1629 * instead cleared when read. So we clear them both ways ...
1631 static inline void clear_tally_counters(void __iomem *ioaddr)
1633 iowrite32(0, ioaddr + RxMissed);
1634 ioread16(ioaddr + RxCRCErrs);
1635 ioread16(ioaddr + RxMissed);
1638 static void rhine_restart_tx(struct net_device *dev) {
1639 struct rhine_private *rp = netdev_priv(dev);
1640 void __iomem *ioaddr = rp->base;
1641 int entry = rp->dirty_tx % TX_RING_SIZE;
1645 * If new errors occured, we need to sort them out before doing Tx.
1646 * In that case the ISR will be back here RSN anyway.
1648 intr_status = get_intr_status(dev);
1650 if ((intr_status & IntrTxErrSummary) == 0) {
1652 /* We know better than the chip where it should continue. */
1653 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1654 ioaddr + TxRingPtr);
1656 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1658 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1663 /* This should never happen */
1665 printk(KERN_WARNING "%s: rhine_restart_tx() "
1666 "Another error occured %8.8x.\n",
1667 dev->name, intr_status);
1672 static void rhine_error(struct net_device *dev, int intr_status)
1674 struct rhine_private *rp = netdev_priv(dev);
1675 void __iomem *ioaddr = rp->base;
1677 spin_lock(&rp->lock);
1679 if (intr_status & IntrLinkChange)
1680 schedule_work(&rp->check_media_task);
1681 if (intr_status & IntrStatsMax) {
1682 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1683 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1684 clear_tally_counters(ioaddr);
1686 if (intr_status & IntrTxAborted) {
1688 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1689 dev->name, intr_status);
1691 if (intr_status & IntrTxUnderrun) {
1692 if (rp->tx_thresh < 0xE0)
1693 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1695 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1696 "threshold now %2.2x.\n",
1697 dev->name, rp->tx_thresh);
1699 if (intr_status & IntrTxDescRace) {
1701 printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1704 if ((intr_status & IntrTxError) &&
1705 (intr_status & (IntrTxAborted |
1706 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1707 if (rp->tx_thresh < 0xE0) {
1708 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1711 printk(KERN_INFO "%s: Unspecified error. Tx "
1712 "threshold now %2.2x.\n",
1713 dev->name, rp->tx_thresh);
1715 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1717 rhine_restart_tx(dev);
1719 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1720 IntrTxError | IntrTxAborted | IntrNormalSummary |
1723 printk(KERN_ERR "%s: Something Wicked happened! "
1724 "%8.8x.\n", dev->name, intr_status);
1727 spin_unlock(&rp->lock);
1730 static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1732 struct rhine_private *rp = netdev_priv(dev);
1733 void __iomem *ioaddr = rp->base;
1734 unsigned long flags;
1736 spin_lock_irqsave(&rp->lock, flags);
1737 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1738 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1739 clear_tally_counters(ioaddr);
1740 spin_unlock_irqrestore(&rp->lock, flags);
1745 static void rhine_set_rx_mode(struct net_device *dev)
1747 struct rhine_private *rp = netdev_priv(dev);
1748 void __iomem *ioaddr = rp->base;
1749 u32 mc_filter[2]; /* Multicast hash filter */
1750 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1752 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1753 /* Unconditionally log net taps. */
1754 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
1757 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1758 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1759 } else if ((dev->mc_count > multicast_filter_limit)
1760 || (dev->flags & IFF_ALLMULTI)) {
1761 /* Too many to match, or accept all multicasts. */
1762 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1763 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1766 struct dev_mc_list *mclist;
1768 memset(mc_filter, 0, sizeof(mc_filter));
1769 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1770 i++, mclist = mclist->next) {
1771 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1773 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1775 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1776 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1779 iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1782 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1784 struct rhine_private *rp = netdev_priv(dev);
1786 strcpy(info->driver, DRV_NAME);
1787 strcpy(info->version, DRV_VERSION);
1788 strcpy(info->bus_info, pci_name(rp->pdev));
1791 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1793 struct rhine_private *rp = netdev_priv(dev);
1796 spin_lock_irq(&rp->lock);
1797 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1798 spin_unlock_irq(&rp->lock);
1803 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1805 struct rhine_private *rp = netdev_priv(dev);
1808 spin_lock_irq(&rp->lock);
1809 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1810 spin_unlock_irq(&rp->lock);
1811 rhine_set_carrier(&rp->mii_if);
1816 static int netdev_nway_reset(struct net_device *dev)
1818 struct rhine_private *rp = netdev_priv(dev);
1820 return mii_nway_restart(&rp->mii_if);
1823 static u32 netdev_get_link(struct net_device *dev)
1825 struct rhine_private *rp = netdev_priv(dev);
1827 return mii_link_ok(&rp->mii_if);
1830 static u32 netdev_get_msglevel(struct net_device *dev)
1835 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1840 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1842 struct rhine_private *rp = netdev_priv(dev);
1844 if (!(rp->quirks & rqWOL))
1847 spin_lock_irq(&rp->lock);
1848 wol->supported = WAKE_PHY | WAKE_MAGIC |
1849 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1850 wol->wolopts = rp->wolopts;
1851 spin_unlock_irq(&rp->lock);
1854 static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1856 struct rhine_private *rp = netdev_priv(dev);
1857 u32 support = WAKE_PHY | WAKE_MAGIC |
1858 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1860 if (!(rp->quirks & rqWOL))
1863 if (wol->wolopts & ~support)
1866 spin_lock_irq(&rp->lock);
1867 rp->wolopts = wol->wolopts;
1868 spin_unlock_irq(&rp->lock);
1873 static struct ethtool_ops netdev_ethtool_ops = {
1874 .get_drvinfo = netdev_get_drvinfo,
1875 .get_settings = netdev_get_settings,
1876 .set_settings = netdev_set_settings,
1877 .nway_reset = netdev_nway_reset,
1878 .get_link = netdev_get_link,
1879 .get_msglevel = netdev_get_msglevel,
1880 .set_msglevel = netdev_set_msglevel,
1881 .get_wol = rhine_get_wol,
1882 .set_wol = rhine_set_wol,
1883 .get_sg = ethtool_op_get_sg,
1884 .get_tx_csum = ethtool_op_get_tx_csum,
1885 .get_perm_addr = ethtool_op_get_perm_addr,
1888 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1890 struct rhine_private *rp = netdev_priv(dev);
1893 if (!netif_running(dev))
1896 spin_lock_irq(&rp->lock);
1897 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1898 spin_unlock_irq(&rp->lock);
1899 rhine_set_carrier(&rp->mii_if);
1904 static int rhine_close(struct net_device *dev)
1906 struct rhine_private *rp = netdev_priv(dev);
1907 void __iomem *ioaddr = rp->base;
1909 spin_lock_irq(&rp->lock);
1911 netif_stop_queue(dev);
1914 printk(KERN_DEBUG "%s: Shutting down ethercard, "
1915 "status was %4.4x.\n",
1916 dev->name, ioread16(ioaddr + ChipCmd));
1918 /* Switch to loopback mode to avoid hardware races. */
1919 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1921 /* Disable interrupts by clearing the interrupt mask. */
1922 iowrite16(0x0000, ioaddr + IntrEnable);
1924 /* Stop the chip's Tx and Rx processes. */
1925 iowrite16(CmdStop, ioaddr + ChipCmd);
1927 spin_unlock_irq(&rp->lock);
1929 free_irq(rp->pdev->irq, dev);
1931 flush_scheduled_work();
1941 static void __devexit rhine_remove_one(struct pci_dev *pdev)
1943 struct net_device *dev = pci_get_drvdata(pdev);
1944 struct rhine_private *rp = netdev_priv(dev);
1946 unregister_netdev(dev);
1948 pci_iounmap(pdev, rp->base);
1949 pci_release_regions(pdev);
1952 pci_disable_device(pdev);
1953 pci_set_drvdata(pdev, NULL);
1956 static void rhine_shutdown (struct pci_dev *pdev)
1958 struct net_device *dev = pci_get_drvdata(pdev);
1959 struct rhine_private *rp = netdev_priv(dev);
1960 void __iomem *ioaddr = rp->base;
1962 if (!(rp->quirks & rqWOL))
1963 return; /* Nothing to do for non-WOL adapters */
1965 rhine_power_init(dev);
1967 /* Make sure we use pattern 0, 1 and not 4, 5 */
1968 if (rp->quirks & rq6patterns)
1969 iowrite8(0x04, ioaddr + 0xA7);
1971 if (rp->wolopts & WAKE_MAGIC) {
1972 iowrite8(WOLmagic, ioaddr + WOLcrSet);
1974 * Turn EEPROM-controlled wake-up back on -- some hardware may
1975 * not cooperate otherwise.
1977 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
1980 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
1981 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
1983 if (rp->wolopts & WAKE_PHY)
1984 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
1986 if (rp->wolopts & WAKE_UCAST)
1987 iowrite8(WOLucast, ioaddr + WOLcrSet);
1990 /* Enable legacy WOL (for old motherboards) */
1991 iowrite8(0x01, ioaddr + PwcfgSet);
1992 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
1995 /* Hit power state D3 (sleep) */
1996 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1998 /* TODO: Check use of pci_enable_wake() */
2003 static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
2005 struct net_device *dev = pci_get_drvdata(pdev);
2006 struct rhine_private *rp = netdev_priv(dev);
2007 unsigned long flags;
2009 if (!netif_running(dev))
2012 netif_device_detach(dev);
2013 pci_save_state(pdev);
2015 spin_lock_irqsave(&rp->lock, flags);
2016 rhine_shutdown(pdev);
2017 spin_unlock_irqrestore(&rp->lock, flags);
2019 free_irq(dev->irq, dev);
2023 static int rhine_resume(struct pci_dev *pdev)
2025 struct net_device *dev = pci_get_drvdata(pdev);
2026 struct rhine_private *rp = netdev_priv(dev);
2027 unsigned long flags;
2030 if (!netif_running(dev))
2033 if (request_irq(dev->irq, rhine_interrupt, SA_SHIRQ, dev->name, dev))
2034 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
2036 ret = pci_set_power_state(pdev, PCI_D0);
2038 printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
2039 dev->name, ret ? "failed" : "succeeded", ret);
2041 pci_restore_state(pdev);
2043 spin_lock_irqsave(&rp->lock, flags);
2045 enable_mmio(rp->pioaddr, rp->quirks);
2047 rhine_power_init(dev);
2052 init_registers(dev);
2053 spin_unlock_irqrestore(&rp->lock, flags);
2055 netif_device_attach(dev);
2059 #endif /* CONFIG_PM */
2061 static struct pci_driver rhine_driver = {
2063 .id_table = rhine_pci_tbl,
2064 .probe = rhine_init_one,
2065 .remove = __devexit_p(rhine_remove_one),
2067 .suspend = rhine_suspend,
2068 .resume = rhine_resume,
2069 #endif /* CONFIG_PM */
2070 .shutdown = rhine_shutdown,
2074 static int __init rhine_init(void)
2076 /* when a module, this is printed whether or not devices are found in probe */
2080 return pci_module_init(&rhine_driver);
2084 static void __exit rhine_cleanup(void)
2086 pci_unregister_driver(&rhine_driver);
2090 module_init(rhine_init);
2091 module_exit(rhine_cleanup);