2 * Copyright (C) 2006-2007 PA Semi, Inc
4 * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/delay.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <asm/dma-mapping.h>
30 #include <linux/skbuff.h>
33 #include <linux/tcp.h>
34 #include <net/checksum.h>
37 #include <asm/firmware.h>
38 #include <asm/pasemi_dma.h>
40 #include "pasemi_mac.h"
42 /* We have our own align, since ppc64 in general has it at 0 because
43 * of design flaws in some of the server bridge chips. However, for
44 * PWRficient doing the unaligned copies is more expensive than doing
45 * unaligned DMA, so make sure the data is aligned instead.
47 #define LOCAL_SKB_ALIGN 2
58 /* Must be a power of two */
59 #define RX_RING_SIZE 1024
60 #define TX_RING_SIZE 4096
62 #define DEFAULT_MSG_ENABLE \
72 #define TX_DESC(tx, num) ((tx)->chan.ring_virt[(num) & (TX_RING_SIZE-1)])
73 #define TX_DESC_INFO(tx, num) ((tx)->ring_info[(num) & (TX_RING_SIZE-1)])
74 #define RX_DESC(rx, num) ((rx)->chan.ring_virt[(num) & (RX_RING_SIZE-1)])
75 #define RX_DESC_INFO(rx, num) ((rx)->ring_info[(num) & (RX_RING_SIZE-1)])
76 #define RX_BUFF(rx, num) ((rx)->buffers[(num) & (RX_RING_SIZE-1)])
78 #define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \
80 #define RING_AVAIL(ring) ((ring->size) - RING_USED(ring))
82 #define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
84 MODULE_LICENSE("GPL");
85 MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
86 MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
88 static int debug = -1; /* -1 == use DEFAULT_MSG_ENABLE as value */
89 module_param(debug, int, 0);
90 MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value");
92 static int translation_enabled(void)
94 #if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE)
97 return firmware_has_feature(FW_FEATURE_LPAR);
101 static void write_iob_reg(unsigned int reg, unsigned int val)
103 pasemi_write_iob_reg(reg, val);
106 static unsigned int read_mac_reg(const struct pasemi_mac *mac, unsigned int reg)
108 return pasemi_read_mac_reg(mac->dma_if, reg);
111 static void write_mac_reg(const struct pasemi_mac *mac, unsigned int reg,
114 pasemi_write_mac_reg(mac->dma_if, reg, val);
117 static unsigned int read_dma_reg(unsigned int reg)
119 return pasemi_read_dma_reg(reg);
122 static void write_dma_reg(unsigned int reg, unsigned int val)
124 pasemi_write_dma_reg(reg, val);
127 static struct pasemi_mac_rxring *rx_ring(const struct pasemi_mac *mac)
132 static struct pasemi_mac_txring *tx_ring(const struct pasemi_mac *mac)
137 static inline void prefetch_skb(const struct sk_buff *skb)
147 static int mac_to_intf(struct pasemi_mac *mac)
149 struct pci_dev *pdev = mac->pdev;
151 int nintf, off, i, j;
152 int devfn = pdev->devfn;
154 tmp = read_dma_reg(PAS_DMA_CAP_IFI);
155 nintf = (tmp & PAS_DMA_CAP_IFI_NIN_M) >> PAS_DMA_CAP_IFI_NIN_S;
156 off = (tmp & PAS_DMA_CAP_IFI_IOFF_M) >> PAS_DMA_CAP_IFI_IOFF_S;
158 /* IOFF contains the offset to the registers containing the
159 * DMA interface-to-MAC-pci-id mappings, and NIN contains number
160 * of total interfaces. Each register contains 4 devfns.
161 * Just do a linear search until we find the devfn of the MAC
162 * we're trying to look up.
165 for (i = 0; i < (nintf+3)/4; i++) {
166 tmp = read_dma_reg(off+4*i);
167 for (j = 0; j < 4; j++) {
168 if (((tmp >> (8*j)) & 0xff) == devfn)
175 static int pasemi_get_mac_addr(struct pasemi_mac *mac)
177 struct pci_dev *pdev = mac->pdev;
178 struct device_node *dn = pci_device_to_OF_node(pdev);
185 "No device node for mac, not configuring\n");
189 maddr = of_get_property(dn, "local-mac-address", &len);
191 if (maddr && len == 6) {
192 memcpy(mac->mac_addr, maddr, 6);
196 /* Some old versions of firmware mistakenly uses mac-address
197 * (and as a string) instead of a byte array in local-mac-address.
201 maddr = of_get_property(dn, "mac-address", NULL);
205 "no mac address in device tree, not configuring\n");
210 if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
211 &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
213 "can't parse mac address, not configuring\n");
217 memcpy(mac->mac_addr, addr, 6);
222 static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac,
224 const dma_addr_t *dmas)
227 int nfrags = skb_shinfo(skb)->nr_frags;
228 struct pci_dev *pdev = mac->dma_pdev;
230 pci_unmap_single(pdev, dmas[0], skb_headlen(skb), PCI_DMA_TODEVICE);
232 for (f = 0; f < nfrags; f++) {
233 skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
235 pci_unmap_page(pdev, dmas[f+1], frag->size, PCI_DMA_TODEVICE);
237 dev_kfree_skb_irq(skb);
239 /* Freed descriptor slot + main SKB ptr + nfrags additional ptrs,
240 * aligned up to a power of 2
242 return (nfrags + 3) & ~1;
245 static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
247 struct pasemi_mac_rxring *ring;
248 struct pasemi_mac *mac = netdev_priv(dev);
252 ring = pasemi_dma_alloc_chan(RXCHAN, sizeof(struct pasemi_mac_rxring),
253 offsetof(struct pasemi_mac_rxring, chan));
256 dev_err(&mac->pdev->dev, "Can't allocate RX channel\n");
259 chno = ring->chan.chno;
261 spin_lock_init(&ring->lock);
263 ring->size = RX_RING_SIZE;
264 ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
265 RX_RING_SIZE, GFP_KERNEL);
267 if (!ring->ring_info)
270 /* Allocate descriptors */
271 if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE))
274 ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
275 RX_RING_SIZE * sizeof(u64),
276 &ring->buf_dma, GFP_KERNEL);
280 memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64));
282 write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno),
283 PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma));
285 write_dma_reg(PAS_DMA_RXCHAN_BASEU(chno),
286 PAS_DMA_RXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32) |
287 PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 3));
289 cfg = PAS_DMA_RXCHAN_CFG_HBU(2);
291 if (translation_enabled())
292 cfg |= PAS_DMA_RXCHAN_CFG_CTR;
294 write_dma_reg(PAS_DMA_RXCHAN_CFG(chno), cfg);
296 write_dma_reg(PAS_DMA_RXINT_BASEL(mac->dma_if),
297 PAS_DMA_RXINT_BASEL_BRBL(ring->buf_dma));
299 write_dma_reg(PAS_DMA_RXINT_BASEU(mac->dma_if),
300 PAS_DMA_RXINT_BASEU_BRBH(ring->buf_dma >> 32) |
301 PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3));
303 cfg = PAS_DMA_RXINT_CFG_DHL(2) | PAS_DMA_RXINT_CFG_L2 |
304 PAS_DMA_RXINT_CFG_LW | PAS_DMA_RXINT_CFG_RBP |
305 PAS_DMA_RXINT_CFG_HEN;
307 if (translation_enabled())
308 cfg |= PAS_DMA_RXINT_CFG_ITRR | PAS_DMA_RXINT_CFG_ITR;
310 write_dma_reg(PAS_DMA_RXINT_CFG(mac->dma_if), cfg);
312 ring->next_to_fill = 0;
313 ring->next_to_clean = 0;
320 kfree(ring->ring_info);
322 pasemi_dma_free_chan(&ring->chan);
327 static struct pasemi_mac_txring *
328 pasemi_mac_setup_tx_resources(const struct net_device *dev)
330 struct pasemi_mac *mac = netdev_priv(dev);
332 struct pasemi_mac_txring *ring;
336 ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_txring),
337 offsetof(struct pasemi_mac_txring, chan));
340 dev_err(&mac->pdev->dev, "Can't allocate TX channel\n");
344 chno = ring->chan.chno;
346 spin_lock_init(&ring->lock);
348 ring->size = TX_RING_SIZE;
349 ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
350 TX_RING_SIZE, GFP_KERNEL);
351 if (!ring->ring_info)
354 /* Allocate descriptors */
355 if (pasemi_dma_alloc_ring(&ring->chan, TX_RING_SIZE))
358 write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno),
359 PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma));
360 val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32);
361 val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 3);
363 write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val);
365 cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE |
366 PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) |
367 PAS_DMA_TXCHAN_CFG_UP |
368 PAS_DMA_TXCHAN_CFG_WT(2);
370 if (translation_enabled())
371 cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR;
373 write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg);
375 ring->next_to_fill = 0;
376 ring->next_to_clean = 0;
382 kfree(ring->ring_info);
384 pasemi_dma_free_chan(&ring->chan);
389 static void pasemi_mac_free_tx_resources(struct pasemi_mac *mac)
391 struct pasemi_mac_txring *txring = tx_ring(mac);
393 struct pasemi_mac_buffer *info;
394 dma_addr_t dmas[MAX_SKB_FRAGS+1];
398 start = txring->next_to_clean;
399 limit = txring->next_to_fill;
401 /* Compensate for when fill has wrapped and clean has not */
403 limit += TX_RING_SIZE;
405 for (i = start; i < limit; i += freed) {
406 info = &txring->ring_info[(i+1) & (TX_RING_SIZE-1)];
407 if (info->dma && info->skb) {
408 for (j = 0; j <= skb_shinfo(info->skb)->nr_frags; j++)
409 dmas[j] = txring->ring_info[(i+1+j) &
410 (TX_RING_SIZE-1)].dma;
411 freed = pasemi_mac_unmap_tx_skb(mac, info->skb, dmas);
416 kfree(txring->ring_info);
417 pasemi_dma_free_chan(&txring->chan);
421 static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac)
423 struct pasemi_mac_rxring *rx = rx_ring(mac);
425 struct pasemi_mac_buffer *info;
427 for (i = 0; i < RX_RING_SIZE; i++) {
428 info = &RX_DESC_INFO(rx, i);
429 if (info->skb && info->dma) {
430 pci_unmap_single(mac->dma_pdev,
434 dev_kfree_skb_any(info->skb);
440 for (i = 0; i < RX_RING_SIZE; i++)
443 dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64),
444 rx_ring(mac)->buffers, rx_ring(mac)->buf_dma);
446 kfree(rx_ring(mac)->ring_info);
447 pasemi_dma_free_chan(&rx_ring(mac)->chan);
451 static void pasemi_mac_replenish_rx_ring(const struct net_device *dev,
454 const struct pasemi_mac *mac = netdev_priv(dev);
455 struct pasemi_mac_rxring *rx = rx_ring(mac);
461 fill = rx_ring(mac)->next_to_fill;
462 for (count = 0; count < limit; count++) {
463 struct pasemi_mac_buffer *info = &RX_DESC_INFO(rx, fill);
464 u64 *buff = &RX_BUFF(rx, fill);
471 /* skb might still be in there for recycle on short receives */
475 skb = dev_alloc_skb(BUF_SIZE);
476 skb_reserve(skb, LOCAL_SKB_ALIGN);
482 dma = pci_map_single(mac->dma_pdev, skb->data,
483 BUF_SIZE - LOCAL_SKB_ALIGN,
486 if (unlikely(dma_mapping_error(dma))) {
487 dev_kfree_skb_irq(info->skb);
493 *buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma);
499 write_dma_reg(PAS_DMA_RXINT_INCR(mac->dma_if), count);
501 rx_ring(mac)->next_to_fill = (rx_ring(mac)->next_to_fill + count) &
505 static void pasemi_mac_restart_rx_intr(const struct pasemi_mac *mac)
507 unsigned int reg, pcnt;
508 /* Re-enable packet count interrupts: finally
509 * ack the packet count interrupt we got in rx_intr.
512 pcnt = *rx_ring(mac)->chan.status & PAS_STATUS_PCNT_M;
514 reg = PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_RXCH_RESET_PINTC;
516 write_iob_reg(PAS_IOB_DMA_RXCH_RESET(mac->rx->chan.chno), reg);
519 static void pasemi_mac_restart_tx_intr(const struct pasemi_mac *mac)
521 unsigned int reg, pcnt;
523 /* Re-enable packet count interrupts */
524 pcnt = *tx_ring(mac)->chan.status & PAS_STATUS_PCNT_M;
526 reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC;
528 write_iob_reg(PAS_IOB_DMA_TXCH_RESET(tx_ring(mac)->chan.chno), reg);
532 static inline void pasemi_mac_rx_error(const struct pasemi_mac *mac,
535 unsigned int rcmdsta, ccmdsta;
536 struct pasemi_dmachan *chan = &rx_ring(mac)->chan;
538 if (!netif_msg_rx_err(mac))
541 rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
542 ccmdsta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno));
544 printk(KERN_ERR "pasemi_mac: rx error. macrx %016lx, rx status %lx\n",
545 macrx, *chan->status);
547 printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n",
551 static inline void pasemi_mac_tx_error(const struct pasemi_mac *mac,
555 struct pasemi_dmachan *chan = &tx_ring(mac)->chan;
557 if (!netif_msg_tx_err(mac))
560 cmdsta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno));
562 printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016lx, "\
563 "tx status 0x%016lx\n", mactx, *chan->status);
565 printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta);
568 static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx,
571 const struct pasemi_dmachan *chan = &rx->chan;
572 struct pasemi_mac *mac = rx->mac;
573 struct pci_dev *pdev = mac->dma_pdev;
575 int count, buf_index, tot_bytes, packets;
576 struct pasemi_mac_buffer *info;
585 spin_lock(&rx->lock);
587 n = rx->next_to_clean;
589 prefetch(&RX_DESC(rx, n));
591 for (count = 0; count < limit; count++) {
592 macrx = RX_DESC(rx, n);
593 prefetch(&RX_DESC(rx, n+4));
595 if ((macrx & XCT_MACRX_E) ||
596 (*chan->status & PAS_STATUS_ERROR))
597 pasemi_mac_rx_error(mac, macrx);
599 if (!(macrx & XCT_MACRX_O))
604 BUG_ON(!(macrx & XCT_MACRX_RR_8BRES));
606 eval = (RX_DESC(rx, n+1) & XCT_RXRES_8B_EVAL_M) >>
610 dma = (RX_DESC(rx, n+2) & XCT_PTR_ADDR_M);
611 info = &RX_DESC_INFO(rx, buf_index);
617 len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
619 pci_unmap_single(pdev, dma, BUF_SIZE-LOCAL_SKB_ALIGN,
622 if (macrx & XCT_MACRX_CRC) {
623 /* CRC error flagged */
624 mac->netdev->stats.rx_errors++;
625 mac->netdev->stats.rx_crc_errors++;
626 /* No need to free skb, it'll be reused */
631 struct sk_buff *new_skb;
633 new_skb = netdev_alloc_skb(mac->netdev,
634 len + LOCAL_SKB_ALIGN);
636 skb_reserve(new_skb, LOCAL_SKB_ALIGN);
637 memcpy(new_skb->data, skb->data, len);
638 /* save the skb in buffer_info as good */
641 /* else just continue with the old one */
647 if (likely((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK)) {
648 skb->ip_summed = CHECKSUM_UNNECESSARY;
649 skb->csum = (macrx & XCT_MACRX_CSUM_M) >>
652 skb->ip_summed = CHECKSUM_NONE;
657 /* Don't include CRC */
660 skb->protocol = eth_type_trans(skb, mac->netdev);
661 netif_receive_skb(skb);
665 RX_DESC(rx, n+1) = 0;
667 /* Need to zero it out since hardware doesn't, since the
668 * replenish loop uses it to tell when it's done.
670 RX_BUFF(rx, buf_index) = 0;
675 if (n > RX_RING_SIZE) {
676 /* Errata 5971 workaround: L2 target of headers */
677 write_iob_reg(PAS_IOB_COM_PKTHDRCNT, 0);
678 n &= (RX_RING_SIZE-1);
681 rx_ring(mac)->next_to_clean = n;
683 /* Increase is in number of 16-byte entries, and since each descriptor
684 * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with
687 write_dma_reg(PAS_DMA_RXCHAN_INCR(mac->rx->chan.chno), count << 1);
689 pasemi_mac_replenish_rx_ring(mac->netdev, count);
691 mac->netdev->stats.rx_bytes += tot_bytes;
692 mac->netdev->stats.rx_packets += packets;
694 spin_unlock(&rx_ring(mac)->lock);
699 /* Can't make this too large or we blow the kernel stack limits */
700 #define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS)
702 static int pasemi_mac_clean_tx(struct pasemi_mac_txring *txring)
704 struct pasemi_dmachan *chan = &txring->chan;
705 struct pasemi_mac *mac = txring->mac;
707 unsigned int start, descr_count, buf_count, batch_limit;
708 unsigned int ring_limit;
709 unsigned int total_count;
711 struct sk_buff *skbs[TX_CLEAN_BATCHSIZE];
712 dma_addr_t dmas[TX_CLEAN_BATCHSIZE][MAX_SKB_FRAGS+1];
715 batch_limit = TX_CLEAN_BATCHSIZE;
717 spin_lock_irqsave(&txring->lock, flags);
719 start = txring->next_to_clean;
720 ring_limit = txring->next_to_fill;
722 /* Compensate for when fill has wrapped but clean has not */
723 if (start > ring_limit)
724 ring_limit += TX_RING_SIZE;
730 descr_count < batch_limit && i < ring_limit;
732 u64 mactx = TX_DESC(txring, i);
735 if ((mactx & XCT_MACTX_E) ||
736 (*chan->status & PAS_STATUS_ERROR))
737 pasemi_mac_tx_error(mac, mactx);
739 if (unlikely(mactx & XCT_MACTX_O))
740 /* Not yet transmitted */
743 skb = TX_DESC_INFO(txring, i+1).skb;
744 skbs[descr_count] = skb;
746 buf_count = 2 + skb_shinfo(skb)->nr_frags;
747 for (j = 0; j <= skb_shinfo(skb)->nr_frags; j++)
748 dmas[descr_count][j] = TX_DESC_INFO(txring, i+1+j).dma;
750 TX_DESC(txring, i) = 0;
751 TX_DESC(txring, i+1) = 0;
753 /* Since we always fill with an even number of entries, make
754 * sure we skip any unused one at the end as well.
760 txring->next_to_clean = i & (TX_RING_SIZE-1);
762 spin_unlock_irqrestore(&txring->lock, flags);
763 netif_wake_queue(mac->netdev);
765 for (i = 0; i < descr_count; i++)
766 pasemi_mac_unmap_tx_skb(mac, skbs[i], dmas[i]);
768 total_count += descr_count;
770 /* If the batch was full, try to clean more */
771 if (descr_count == batch_limit)
778 static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
780 const struct pasemi_mac_rxring *rxring = data;
781 struct pasemi_mac *mac = rxring->mac;
782 struct net_device *dev = mac->netdev;
783 const struct pasemi_dmachan *chan = &rxring->chan;
786 if (!(*chan->status & PAS_STATUS_CAUSE_M))
789 /* Don't reset packet count so it won't fire again but clear
794 if (*chan->status & PAS_STATUS_SOFT)
795 reg |= PAS_IOB_DMA_RXCH_RESET_SINTC;
796 if (*chan->status & PAS_STATUS_ERROR)
797 reg |= PAS_IOB_DMA_RXCH_RESET_DINTC;
798 if (*chan->status & PAS_STATUS_TIMER)
799 reg |= PAS_IOB_DMA_RXCH_RESET_TINTC;
801 netif_rx_schedule(dev, &mac->napi);
803 write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg);
808 #define TX_CLEAN_INTERVAL HZ
810 static void pasemi_mac_tx_timer(unsigned long data)
812 struct pasemi_mac_txring *txring = (struct pasemi_mac_txring *)data;
813 struct pasemi_mac *mac = txring->mac;
815 pasemi_mac_clean_tx(txring);
817 mod_timer(&txring->clean_timer, jiffies + TX_CLEAN_INTERVAL);
819 pasemi_mac_restart_tx_intr(mac);
822 static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
824 struct pasemi_mac_txring *txring = data;
825 const struct pasemi_dmachan *chan = &txring->chan;
826 struct pasemi_mac *mac = txring->mac;
829 if (!(*chan->status & PAS_STATUS_CAUSE_M))
834 if (*chan->status & PAS_STATUS_SOFT)
835 reg |= PAS_IOB_DMA_TXCH_RESET_SINTC;
836 if (*chan->status & PAS_STATUS_ERROR)
837 reg |= PAS_IOB_DMA_TXCH_RESET_DINTC;
839 mod_timer(&txring->clean_timer, jiffies + (TX_CLEAN_INTERVAL)*2);
841 netif_rx_schedule(mac->netdev, &mac->napi);
844 write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg);
849 static void pasemi_adjust_link(struct net_device *dev)
851 struct pasemi_mac *mac = netdev_priv(dev);
854 unsigned int new_flags;
856 if (!mac->phydev->link) {
857 /* If no link, MAC speed settings don't matter. Just report
858 * link down and return.
860 if (mac->link && netif_msg_link(mac))
861 printk(KERN_INFO "%s: Link is down.\n", dev->name);
863 netif_carrier_off(dev);
868 netif_carrier_on(dev);
870 flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
871 new_flags = flags & ~(PAS_MAC_CFG_PCFG_HD | PAS_MAC_CFG_PCFG_SPD_M |
872 PAS_MAC_CFG_PCFG_TSR_M);
874 if (!mac->phydev->duplex)
875 new_flags |= PAS_MAC_CFG_PCFG_HD;
877 switch (mac->phydev->speed) {
879 new_flags |= PAS_MAC_CFG_PCFG_SPD_1G |
880 PAS_MAC_CFG_PCFG_TSR_1G;
883 new_flags |= PAS_MAC_CFG_PCFG_SPD_100M |
884 PAS_MAC_CFG_PCFG_TSR_100M;
887 new_flags |= PAS_MAC_CFG_PCFG_SPD_10M |
888 PAS_MAC_CFG_PCFG_TSR_10M;
891 printk("Unsupported speed %d\n", mac->phydev->speed);
894 /* Print on link or speed/duplex change */
895 msg = mac->link != mac->phydev->link || flags != new_flags;
897 mac->duplex = mac->phydev->duplex;
898 mac->speed = mac->phydev->speed;
899 mac->link = mac->phydev->link;
901 if (new_flags != flags)
902 write_mac_reg(mac, PAS_MAC_CFG_PCFG, new_flags);
904 if (msg && netif_msg_link(mac))
905 printk(KERN_INFO "%s: Link is up at %d Mbps, %s duplex.\n",
906 dev->name, mac->speed, mac->duplex ? "full" : "half");
909 static int pasemi_mac_phy_init(struct net_device *dev)
911 struct pasemi_mac *mac = netdev_priv(dev);
912 struct device_node *dn, *phy_dn;
913 struct phy_device *phydev;
916 const unsigned int *prop;
920 dn = pci_device_to_OF_node(mac->pdev);
921 ph = of_get_property(dn, "phy-handle", NULL);
924 phy_dn = of_find_node_by_phandle(*ph);
926 prop = of_get_property(phy_dn, "reg", NULL);
927 ret = of_address_to_resource(phy_dn->parent, 0, &r);
932 snprintf(mac->phy_id, BUS_ID_SIZE, PHY_ID_FMT, (int)r.start, phy_id);
940 phydev = phy_connect(dev, mac->phy_id, &pasemi_adjust_link, 0, PHY_INTERFACE_MODE_SGMII);
942 if (IS_ERR(phydev)) {
943 printk(KERN_ERR "%s: Could not attach to phy\n", dev->name);
944 return PTR_ERR(phydev);
947 mac->phydev = phydev;
957 static int pasemi_mac_open(struct net_device *dev)
959 struct pasemi_mac *mac = netdev_priv(dev);
963 /* enable rx section */
964 write_dma_reg(PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN);
966 /* enable tx section */
967 write_dma_reg(PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
969 flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) |
970 PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
971 PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);
973 write_mac_reg(mac, PAS_MAC_CFG_TXP, flags);
975 /* 0xffffff is max value, about 16ms */
976 write_iob_reg(PAS_IOB_DMA_COM_TIMEOUTCFG,
977 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0xffffff));
979 ret = pasemi_mac_setup_rx_resources(dev);
981 goto out_rx_resources;
983 mac->tx = pasemi_mac_setup_tx_resources(dev);
988 write_iob_reg(PAS_IOB_DMA_RXCH_CFG(mac->rx->chan.chno),
989 PAS_IOB_DMA_RXCH_CFG_CNTTH(0));
991 write_iob_reg(PAS_IOB_DMA_TXCH_CFG(mac->tx->chan.chno),
992 PAS_IOB_DMA_TXCH_CFG_CNTTH(32));
994 write_mac_reg(mac, PAS_MAC_IPC_CHNL,
995 PAS_MAC_IPC_CHNL_DCHNO(mac->rx->chan.chno) |
996 PAS_MAC_IPC_CHNL_BCH(mac->rx->chan.chno));
999 write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
1000 PAS_DMA_RXINT_RCMDSTA_EN |
1001 PAS_DMA_RXINT_RCMDSTA_DROPS_M |
1002 PAS_DMA_RXINT_RCMDSTA_BP |
1003 PAS_DMA_RXINT_RCMDSTA_OO |
1004 PAS_DMA_RXINT_RCMDSTA_BT);
1006 /* enable rx channel */
1007 pasemi_dma_start_chan(&rx_ring(mac)->chan, PAS_DMA_RXCHAN_CCMDSTA_DU |
1008 PAS_DMA_RXCHAN_CCMDSTA_OD |
1009 PAS_DMA_RXCHAN_CCMDSTA_FD |
1010 PAS_DMA_RXCHAN_CCMDSTA_DT);
1012 /* enable tx channel */
1013 pasemi_dma_start_chan(&tx_ring(mac)->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ |
1014 PAS_DMA_TXCHAN_TCMDSTA_DB |
1015 PAS_DMA_TXCHAN_TCMDSTA_DE |
1016 PAS_DMA_TXCHAN_TCMDSTA_DA);
1018 pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE);
1020 write_dma_reg(PAS_DMA_RXCHAN_INCR(rx_ring(mac)->chan.chno),
1023 /* Clear out any residual packet count state from firmware */
1024 pasemi_mac_restart_rx_intr(mac);
1025 pasemi_mac_restart_tx_intr(mac);
1027 flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE |
1028 PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE;
1030 if (mac->type == MAC_TYPE_GMAC)
1031 flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G;
1033 flags |= PAS_MAC_CFG_PCFG_TSR_10G | PAS_MAC_CFG_PCFG_SPD_10G;
1035 /* Enable interface in MAC */
1036 write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
1038 ret = pasemi_mac_phy_init(dev);
1039 /* Some configs don't have PHYs (XAUI etc), so don't complain about
1040 * failed init due to -ENODEV.
1042 if (ret && ret != -ENODEV)
1043 dev_warn(&mac->pdev->dev, "phy init failed: %d\n", ret);
1045 netif_start_queue(dev);
1046 napi_enable(&mac->napi);
1048 snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx",
1051 ret = request_irq(mac->tx->chan.irq, &pasemi_mac_tx_intr, IRQF_DISABLED,
1052 mac->tx_irq_name, mac->tx);
1054 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
1055 mac->tx->chan.irq, ret);
1059 snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx",
1062 ret = request_irq(mac->rx->chan.irq, &pasemi_mac_rx_intr, IRQF_DISABLED,
1063 mac->rx_irq_name, mac->rx);
1065 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
1066 mac->rx->chan.irq, ret);
1071 phy_start(mac->phydev);
1073 init_timer(&mac->tx->clean_timer);
1074 mac->tx->clean_timer.function = pasemi_mac_tx_timer;
1075 mac->tx->clean_timer.data = (unsigned long)mac->tx;
1076 mac->tx->clean_timer.expires = jiffies+HZ;
1077 add_timer(&mac->tx->clean_timer);
1082 free_irq(mac->tx->chan.irq, mac->tx);
1084 napi_disable(&mac->napi);
1085 netif_stop_queue(dev);
1088 pasemi_mac_free_tx_resources(mac);
1089 pasemi_mac_free_rx_resources(mac);
1095 #define MAX_RETRIES 5000
1097 static int pasemi_mac_close(struct net_device *dev)
1099 struct pasemi_mac *mac = netdev_priv(dev);
1104 rxch = rx_ring(mac)->chan.chno;
1105 txch = tx_ring(mac)->chan.chno;
1108 phy_stop(mac->phydev);
1109 phy_disconnect(mac->phydev);
1112 del_timer_sync(&mac->tx->clean_timer);
1114 netif_stop_queue(dev);
1115 napi_disable(&mac->napi);
1117 sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
1118 if (sta & (PAS_DMA_RXINT_RCMDSTA_BP |
1119 PAS_DMA_RXINT_RCMDSTA_OO |
1120 PAS_DMA_RXINT_RCMDSTA_BT))
1121 printk(KERN_DEBUG "pasemi_mac: rcmdsta error: 0x%08x\n", sta);
1123 sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
1124 if (sta & (PAS_DMA_RXCHAN_CCMDSTA_DU |
1125 PAS_DMA_RXCHAN_CCMDSTA_OD |
1126 PAS_DMA_RXCHAN_CCMDSTA_FD |
1127 PAS_DMA_RXCHAN_CCMDSTA_DT))
1128 printk(KERN_DEBUG "pasemi_mac: ccmdsta error: 0x%08x\n", sta);
1130 sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch));
1131 if (sta & (PAS_DMA_TXCHAN_TCMDSTA_SZ | PAS_DMA_TXCHAN_TCMDSTA_DB |
1132 PAS_DMA_TXCHAN_TCMDSTA_DE | PAS_DMA_TXCHAN_TCMDSTA_DA))
1133 printk(KERN_DEBUG "pasemi_mac: tcmdsta error: 0x%08x\n", sta);
1135 /* Clean out any pending buffers */
1136 pasemi_mac_clean_tx(tx_ring(mac));
1137 pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE);
1139 /* Disable interface */
1140 write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch),
1141 PAS_DMA_TXCHAN_TCMDSTA_ST);
1142 write_dma_reg( PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
1143 PAS_DMA_RXINT_RCMDSTA_ST);
1144 write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch),
1145 PAS_DMA_RXCHAN_CCMDSTA_ST);
1147 for (retries = 0; retries < MAX_RETRIES; retries++) {
1148 sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(rxch));
1149 if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT))
1154 if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)
1155 dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel\n");
1157 for (retries = 0; retries < MAX_RETRIES; retries++) {
1158 sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
1159 if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT))
1164 if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)
1165 dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n");
1167 for (retries = 0; retries < MAX_RETRIES; retries++) {
1168 sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
1169 if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT))
1174 if (sta & PAS_DMA_RXINT_RCMDSTA_ACT)
1175 dev_err(&mac->dma_pdev->dev, "Failed to stop rx interface\n");
1177 /* Then, disable the channel. This must be done separately from
1178 * stopping, since you can't disable when active.
1181 write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0);
1182 write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0);
1183 write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
1185 free_irq(mac->tx->chan.irq, mac->tx);
1186 free_irq(mac->rx->chan.irq, mac->rx);
1188 /* Free resources */
1189 pasemi_mac_free_rx_resources(mac);
1190 pasemi_mac_free_tx_resources(mac);
1195 static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
1197 struct pasemi_mac *mac = netdev_priv(dev);
1198 struct pasemi_mac_txring *txring;
1200 dma_addr_t map[MAX_SKB_FRAGS+1];
1201 unsigned int map_size[MAX_SKB_FRAGS+1];
1202 unsigned long flags;
1206 dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_CRC_PAD;
1208 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1209 const unsigned char *nh = skb_network_header(skb);
1211 switch (ip_hdr(skb)->protocol) {
1213 dflags |= XCT_MACTX_CSUM_TCP;
1214 dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2);
1215 dflags |= XCT_MACTX_IPO(nh - skb->data);
1218 dflags |= XCT_MACTX_CSUM_UDP;
1219 dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2);
1220 dflags |= XCT_MACTX_IPO(nh - skb->data);
1225 nfrags = skb_shinfo(skb)->nr_frags;
1227 map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb),
1229 map_size[0] = skb_headlen(skb);
1230 if (dma_mapping_error(map[0]))
1231 goto out_err_nolock;
1233 for (i = 0; i < nfrags; i++) {
1234 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1236 map[i+1] = pci_map_page(mac->dma_pdev, frag->page,
1237 frag->page_offset, frag->size,
1239 map_size[i+1] = frag->size;
1240 if (dma_mapping_error(map[i+1])) {
1242 goto out_err_nolock;
1246 mactx = dflags | XCT_MACTX_LLEN(skb->len);
1248 txring = tx_ring(mac);
1250 spin_lock_irqsave(&txring->lock, flags);
1252 fill = txring->next_to_fill;
1254 /* Avoid stepping on the same cache line that the DMA controller
1255 * is currently about to send, so leave at least 8 words available.
1256 * Total free space needed is mactx + fragments + 8
1258 if (RING_AVAIL(txring) < nfrags + 10) {
1259 /* no room -- stop the queue and wait for tx intr */
1260 netif_stop_queue(dev);
1264 TX_DESC(txring, fill) = mactx;
1266 TX_DESC_INFO(txring, fill).skb = skb;
1267 for (i = 0; i <= nfrags; i++) {
1268 TX_DESC(txring, fill+i) =
1269 XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]);
1270 TX_DESC_INFO(txring, fill+i).dma = map[i];
1273 /* We have to add an even number of 8-byte entries to the ring
1274 * even if the last one is unused. That means always an odd number
1275 * of pointers + one mactx descriptor.
1280 txring->next_to_fill = (fill + nfrags + 1) & (TX_RING_SIZE-1);
1282 dev->stats.tx_packets++;
1283 dev->stats.tx_bytes += skb->len;
1285 spin_unlock_irqrestore(&txring->lock, flags);
1287 write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), (nfrags+2) >> 1);
1289 return NETDEV_TX_OK;
1292 spin_unlock_irqrestore(&txring->lock, flags);
1295 pci_unmap_single(mac->dma_pdev, map[nfrags], map_size[nfrags],
1298 return NETDEV_TX_BUSY;
1301 static void pasemi_mac_set_rx_mode(struct net_device *dev)
1303 const struct pasemi_mac *mac = netdev_priv(dev);
1306 flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
1308 /* Set promiscuous */
1309 if (dev->flags & IFF_PROMISC)
1310 flags |= PAS_MAC_CFG_PCFG_PR;
1312 flags &= ~PAS_MAC_CFG_PCFG_PR;
1314 write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
1318 static int pasemi_mac_poll(struct napi_struct *napi, int budget)
1320 struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi);
1321 struct net_device *dev = mac->netdev;
1324 pasemi_mac_clean_tx(tx_ring(mac));
1325 pkts = pasemi_mac_clean_rx(rx_ring(mac), budget);
1326 if (pkts < budget) {
1327 /* all done, no more packets present */
1328 netif_rx_complete(dev, napi);
1330 pasemi_mac_restart_rx_intr(mac);
1331 pasemi_mac_restart_tx_intr(mac);
1336 static int __devinit
1337 pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1339 struct net_device *dev;
1340 struct pasemi_mac *mac;
1342 DECLARE_MAC_BUF(mac_buf);
1344 err = pci_enable_device(pdev);
1348 dev = alloc_etherdev(sizeof(struct pasemi_mac));
1351 "pasemi_mac: Could not allocate ethernet device.\n");
1353 goto out_disable_device;
1356 pci_set_drvdata(pdev, dev);
1357 SET_NETDEV_DEV(dev, &pdev->dev);
1359 mac = netdev_priv(dev);
1364 netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64);
1366 dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG |
1369 mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
1370 if (!mac->dma_pdev) {
1371 dev_err(&mac->pdev->dev, "Can't find DMA Controller\n");
1376 mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
1377 if (!mac->iob_pdev) {
1378 dev_err(&mac->pdev->dev, "Can't find I/O Bridge\n");
1383 /* get mac addr from device tree */
1384 if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) {
1388 memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
1390 mac->dma_if = mac_to_intf(mac);
1391 if (mac->dma_if < 0) {
1392 dev_err(&mac->pdev->dev, "Can't map DMA interface\n");
1397 switch (pdev->device) {
1399 mac->type = MAC_TYPE_GMAC;
1402 mac->type = MAC_TYPE_XAUI;
1409 dev->open = pasemi_mac_open;
1410 dev->stop = pasemi_mac_close;
1411 dev->hard_start_xmit = pasemi_mac_start_tx;
1412 dev->set_multicast_list = pasemi_mac_set_rx_mode;
1417 mac->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
1419 /* Enable most messages by default */
1420 mac->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1422 err = register_netdev(dev);
1425 dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n",
1428 } else if netif_msg_probe(mac)
1429 printk(KERN_INFO "%s: PA Semi %s: intf %d, hw addr %s\n",
1430 dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
1431 mac->dma_if, print_mac(mac_buf, dev->dev_addr));
1437 pci_dev_put(mac->iob_pdev);
1439 pci_dev_put(mac->dma_pdev);
1443 pci_disable_device(pdev);
1448 static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
1450 struct net_device *netdev = pci_get_drvdata(pdev);
1451 struct pasemi_mac *mac;
1456 mac = netdev_priv(netdev);
1458 unregister_netdev(netdev);
1460 pci_disable_device(pdev);
1461 pci_dev_put(mac->dma_pdev);
1462 pci_dev_put(mac->iob_pdev);
1464 pasemi_dma_free_chan(&mac->tx->chan);
1465 pasemi_dma_free_chan(&mac->rx->chan);
1467 pci_set_drvdata(pdev, NULL);
1468 free_netdev(netdev);
1471 static struct pci_device_id pasemi_mac_pci_tbl[] = {
1472 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
1473 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
1477 MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl);
1479 static struct pci_driver pasemi_mac_driver = {
1480 .name = "pasemi_mac",
1481 .id_table = pasemi_mac_pci_tbl,
1482 .probe = pasemi_mac_probe,
1483 .remove = __devexit_p(pasemi_mac_remove),
1486 static void __exit pasemi_mac_cleanup_module(void)
1488 pci_unregister_driver(&pasemi_mac_driver);
1491 int pasemi_mac_init_module(void)
1495 err = pasemi_dma_init();
1499 return pci_register_driver(&pasemi_mac_driver);
1502 module_init(pasemi_mac_init_module);
1503 module_exit(pasemi_mac_cleanup_module);