]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/b44.c
b44: tx bounce sizing.
[linux-2.6-omap-h63xx.git] / drivers / net / b44.c
1 /* b44.c: Broadcom 4400 device driver.
2  *
3  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4  * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5  * Copyright (C) 2006 Broadcom Corporation.
6  *
7  * Distribute under GPL.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/dma-mapping.h>
23
24 #include <asm/uaccess.h>
25 #include <asm/io.h>
26 #include <asm/irq.h>
27
28 #include "b44.h"
29
30 #define DRV_MODULE_NAME         "b44"
31 #define PFX DRV_MODULE_NAME     ": "
32 #define DRV_MODULE_VERSION      "1.01"
33 #define DRV_MODULE_RELDATE      "Jun 16, 2006"
34
35 #define B44_DEF_MSG_ENABLE        \
36         (NETIF_MSG_DRV          | \
37          NETIF_MSG_PROBE        | \
38          NETIF_MSG_LINK         | \
39          NETIF_MSG_TIMER        | \
40          NETIF_MSG_IFDOWN       | \
41          NETIF_MSG_IFUP         | \
42          NETIF_MSG_RX_ERR       | \
43          NETIF_MSG_TX_ERR)
44
45 /* length of time before we decide the hardware is borked,
46  * and dev->tx_timeout() should be called to fix the problem
47  */
48 #define B44_TX_TIMEOUT                  (5 * HZ)
49
50 /* hardware minimum and maximum for a single frame's data payload */
51 #define B44_MIN_MTU                     60
52 #define B44_MAX_MTU                     1500
53
54 #define B44_RX_RING_SIZE                512
55 #define B44_DEF_RX_RING_PENDING         200
56 #define B44_RX_RING_BYTES       (sizeof(struct dma_desc) * \
57                                  B44_RX_RING_SIZE)
58 #define B44_TX_RING_SIZE                512
59 #define B44_DEF_TX_RING_PENDING         (B44_TX_RING_SIZE - 1)
60 #define B44_TX_RING_BYTES       (sizeof(struct dma_desc) * \
61                                  B44_TX_RING_SIZE)
62
63 #define TX_RING_GAP(BP) \
64         (B44_TX_RING_SIZE - (BP)->tx_pending)
65 #define TX_BUFFS_AVAIL(BP)                                              \
66         (((BP)->tx_cons <= (BP)->tx_prod) ?                             \
67           (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :            \
68           (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
69 #define NEXT_TX(N)              (((N) + 1) & (B44_TX_RING_SIZE - 1))
70
71 #define RX_PKT_BUF_SZ           (1536 + bp->rx_offset + 64)
72
73 /* minimum number of free TX descriptors required to wake up TX process */
74 #define B44_TX_WAKEUP_THRESH            (B44_TX_RING_SIZE / 4)
75
76 /* b44 internal pattern match filter info */
77 #define B44_PATTERN_BASE        0x400
78 #define B44_PATTERN_SIZE        0x80
79 #define B44_PMASK_BASE          0x600
80 #define B44_PMASK_SIZE          0x10
81 #define B44_MAX_PATTERNS        16
82 #define B44_ETHIPV6UDP_HLEN     62
83 #define B44_ETHIPV4UDP_HLEN     42
84
85 static char version[] __devinitdata =
86         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
87
88 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
89 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
90 MODULE_LICENSE("GPL");
91 MODULE_VERSION(DRV_MODULE_VERSION);
92
93 static int b44_debug = -1;      /* -1 == use B44_DEF_MSG_ENABLE as value */
94 module_param(b44_debug, int, 0);
95 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
96
97 static struct pci_device_id b44_pci_tbl[] = {
98         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
99           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
100         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
101           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
102         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
103           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
104         { }     /* terminate list with empty entry */
105 };
106
107 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
108
109 static void b44_halt(struct b44 *);
110 static void b44_init_rings(struct b44 *);
111
112 #define B44_FULL_RESET          1
113 #define B44_FULL_RESET_SKIP_PHY 2
114 #define B44_PARTIAL_RESET       3
115
116 static void b44_init_hw(struct b44 *, int);
117
118 static int dma_desc_align_mask;
119 static int dma_desc_sync_size;
120
121 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
122 #define _B44(x...)      # x,
123 B44_STAT_REG_DECLARE
124 #undef _B44
125 };
126
127 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
128                                                 dma_addr_t dma_base,
129                                                 unsigned long offset,
130                                                 enum dma_data_direction dir)
131 {
132         dma_sync_single_range_for_device(&pdev->dev, dma_base,
133                                          offset & dma_desc_align_mask,
134                                          dma_desc_sync_size, dir);
135 }
136
137 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
138                                              dma_addr_t dma_base,
139                                              unsigned long offset,
140                                              enum dma_data_direction dir)
141 {
142         dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
143                                       offset & dma_desc_align_mask,
144                                       dma_desc_sync_size, dir);
145 }
146
147 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
148 {
149         return readl(bp->regs + reg);
150 }
151
152 static inline void bw32(const struct b44 *bp,
153                         unsigned long reg, unsigned long val)
154 {
155         writel(val, bp->regs + reg);
156 }
157
158 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
159                         u32 bit, unsigned long timeout, const int clear)
160 {
161         unsigned long i;
162
163         for (i = 0; i < timeout; i++) {
164                 u32 val = br32(bp, reg);
165
166                 if (clear && !(val & bit))
167                         break;
168                 if (!clear && (val & bit))
169                         break;
170                 udelay(10);
171         }
172         if (i == timeout) {
173                 printk(KERN_ERR PFX "%s: BUG!  Timeout waiting for bit %08x of register "
174                        "%lx to %s.\n",
175                        bp->dev->name,
176                        bit, reg,
177                        (clear ? "clear" : "set"));
178                 return -ENODEV;
179         }
180         return 0;
181 }
182
183 /* Sonics SiliconBackplane support routines.  ROFL, you should see all the
184  * buzz words used on this company's website :-)
185  *
186  * All of these routines must be invoked with bp->lock held and
187  * interrupts disabled.
188  */
189
190 #define SB_PCI_DMA             0x40000000      /* Client Mode PCI memory access space (1 GB) */
191 #define BCM4400_PCI_CORE_ADDR  0x18002000      /* Address of PCI core on BCM4400 cards */
192
193 static u32 ssb_get_core_rev(struct b44 *bp)
194 {
195         return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
196 }
197
198 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
199 {
200         u32 bar_orig, pci_rev, val;
201
202         pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
203         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
204         pci_rev = ssb_get_core_rev(bp);
205
206         val = br32(bp, B44_SBINTVEC);
207         val |= cores;
208         bw32(bp, B44_SBINTVEC, val);
209
210         val = br32(bp, SSB_PCI_TRANS_2);
211         val |= SSB_PCI_PREF | SSB_PCI_BURST;
212         bw32(bp, SSB_PCI_TRANS_2, val);
213
214         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
215
216         return pci_rev;
217 }
218
219 static void ssb_core_disable(struct b44 *bp)
220 {
221         if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
222                 return;
223
224         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
225         b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
226         b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
227         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
228                             SBTMSLOW_REJECT | SBTMSLOW_RESET));
229         br32(bp, B44_SBTMSLOW);
230         udelay(1);
231         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
232         br32(bp, B44_SBTMSLOW);
233         udelay(1);
234 }
235
236 static void ssb_core_reset(struct b44 *bp)
237 {
238         u32 val;
239
240         ssb_core_disable(bp);
241         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
242         br32(bp, B44_SBTMSLOW);
243         udelay(1);
244
245         /* Clear SERR if set, this is a hw bug workaround.  */
246         if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
247                 bw32(bp, B44_SBTMSHIGH, 0);
248
249         val = br32(bp, B44_SBIMSTATE);
250         if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
251                 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
252
253         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
254         br32(bp, B44_SBTMSLOW);
255         udelay(1);
256
257         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
258         br32(bp, B44_SBTMSLOW);
259         udelay(1);
260 }
261
262 static int ssb_core_unit(struct b44 *bp)
263 {
264 #if 0
265         u32 val = br32(bp, B44_SBADMATCH0);
266         u32 base;
267
268         type = val & SBADMATCH0_TYPE_MASK;
269         switch (type) {
270         case 0:
271                 base = val & SBADMATCH0_BS0_MASK;
272                 break;
273
274         case 1:
275                 base = val & SBADMATCH0_BS1_MASK;
276                 break;
277
278         case 2:
279         default:
280                 base = val & SBADMATCH0_BS2_MASK;
281                 break;
282         };
283 #endif
284         return 0;
285 }
286
287 static int ssb_is_core_up(struct b44 *bp)
288 {
289         return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
290                 == SBTMSLOW_CLOCK);
291 }
292
293 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
294 {
295         u32 val;
296
297         val  = ((u32) data[2]) << 24;
298         val |= ((u32) data[3]) << 16;
299         val |= ((u32) data[4]) <<  8;
300         val |= ((u32) data[5]) <<  0;
301         bw32(bp, B44_CAM_DATA_LO, val);
302         val = (CAM_DATA_HI_VALID |
303                (((u32) data[0]) << 8) |
304                (((u32) data[1]) << 0));
305         bw32(bp, B44_CAM_DATA_HI, val);
306         bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
307                             (index << CAM_CTRL_INDEX_SHIFT)));
308         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
309 }
310
311 static inline void __b44_disable_ints(struct b44 *bp)
312 {
313         bw32(bp, B44_IMASK, 0);
314 }
315
316 static void b44_disable_ints(struct b44 *bp)
317 {
318         __b44_disable_ints(bp);
319
320         /* Flush posted writes. */
321         br32(bp, B44_IMASK);
322 }
323
324 static void b44_enable_ints(struct b44 *bp)
325 {
326         bw32(bp, B44_IMASK, bp->imask);
327 }
328
329 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
330 {
331         int err;
332
333         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
334         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
335                              (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
336                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
337                              (reg << MDIO_DATA_RA_SHIFT) |
338                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
339         err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
340         *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
341
342         return err;
343 }
344
345 static int b44_writephy(struct b44 *bp, int reg, u32 val)
346 {
347         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
348         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
349                              (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
350                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
351                              (reg << MDIO_DATA_RA_SHIFT) |
352                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
353                              (val & MDIO_DATA_DATA)));
354         return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
355 }
356
357 /* miilib interface */
358 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
359  * due to code existing before miilib use was added to this driver.
360  * Someone should remove this artificial driver limitation in
361  * b44_{read,write}phy.  bp->phy_addr itself is fine (and needed).
362  */
363 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
364 {
365         u32 val;
366         struct b44 *bp = netdev_priv(dev);
367         int rc = b44_readphy(bp, location, &val);
368         if (rc)
369                 return 0xffffffff;
370         return val;
371 }
372
373 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
374                          int val)
375 {
376         struct b44 *bp = netdev_priv(dev);
377         b44_writephy(bp, location, val);
378 }
379
380 static int b44_phy_reset(struct b44 *bp)
381 {
382         u32 val;
383         int err;
384
385         err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
386         if (err)
387                 return err;
388         udelay(100);
389         err = b44_readphy(bp, MII_BMCR, &val);
390         if (!err) {
391                 if (val & BMCR_RESET) {
392                         printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
393                                bp->dev->name);
394                         err = -ENODEV;
395                 }
396         }
397
398         return 0;
399 }
400
401 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
402 {
403         u32 val;
404
405         bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
406         bp->flags |= pause_flags;
407
408         val = br32(bp, B44_RXCONFIG);
409         if (pause_flags & B44_FLAG_RX_PAUSE)
410                 val |= RXCONFIG_FLOW;
411         else
412                 val &= ~RXCONFIG_FLOW;
413         bw32(bp, B44_RXCONFIG, val);
414
415         val = br32(bp, B44_MAC_FLOW);
416         if (pause_flags & B44_FLAG_TX_PAUSE)
417                 val |= (MAC_FLOW_PAUSE_ENAB |
418                         (0xc0 & MAC_FLOW_RX_HI_WATER));
419         else
420                 val &= ~MAC_FLOW_PAUSE_ENAB;
421         bw32(bp, B44_MAC_FLOW, val);
422 }
423
424 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
425 {
426         u32 pause_enab = 0;
427
428         /* The driver supports only rx pause by default because
429            the b44 mac tx pause mechanism generates excessive
430            pause frames.
431            Use ethtool to turn on b44 tx pause if necessary.
432          */
433         if ((local & ADVERTISE_PAUSE_CAP) &&
434             (local & ADVERTISE_PAUSE_ASYM)){
435                 if ((remote & LPA_PAUSE_ASYM) &&
436                     !(remote & LPA_PAUSE_CAP))
437                         pause_enab |= B44_FLAG_RX_PAUSE;
438         }
439
440         __b44_set_flow_ctrl(bp, pause_enab);
441 }
442
443 static int b44_setup_phy(struct b44 *bp)
444 {
445         u32 val;
446         int err;
447
448         if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
449                 goto out;
450         if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
451                                 val & MII_ALEDCTRL_ALLMSK)) != 0)
452                 goto out;
453         if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
454                 goto out;
455         if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
456                                 val | MII_TLEDCTRL_ENABLE)) != 0)
457                 goto out;
458
459         if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
460                 u32 adv = ADVERTISE_CSMA;
461
462                 if (bp->flags & B44_FLAG_ADV_10HALF)
463                         adv |= ADVERTISE_10HALF;
464                 if (bp->flags & B44_FLAG_ADV_10FULL)
465                         adv |= ADVERTISE_10FULL;
466                 if (bp->flags & B44_FLAG_ADV_100HALF)
467                         adv |= ADVERTISE_100HALF;
468                 if (bp->flags & B44_FLAG_ADV_100FULL)
469                         adv |= ADVERTISE_100FULL;
470
471                 if (bp->flags & B44_FLAG_PAUSE_AUTO)
472                         adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
473
474                 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
475                         goto out;
476                 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
477                                                        BMCR_ANRESTART))) != 0)
478                         goto out;
479         } else {
480                 u32 bmcr;
481
482                 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
483                         goto out;
484                 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
485                 if (bp->flags & B44_FLAG_100_BASE_T)
486                         bmcr |= BMCR_SPEED100;
487                 if (bp->flags & B44_FLAG_FULL_DUPLEX)
488                         bmcr |= BMCR_FULLDPLX;
489                 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
490                         goto out;
491
492                 /* Since we will not be negotiating there is no safe way
493                  * to determine if the link partner supports flow control
494                  * or not.  So just disable it completely in this case.
495                  */
496                 b44_set_flow_ctrl(bp, 0, 0);
497         }
498
499 out:
500         return err;
501 }
502
503 static void b44_stats_update(struct b44 *bp)
504 {
505         unsigned long reg;
506         u32 *val;
507
508         val = &bp->hw_stats.tx_good_octets;
509         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
510                 *val++ += br32(bp, reg);
511         }
512
513         /* Pad */
514         reg += 8*4UL;
515
516         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
517                 *val++ += br32(bp, reg);
518         }
519 }
520
521 static void b44_link_report(struct b44 *bp)
522 {
523         if (!netif_carrier_ok(bp->dev)) {
524                 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
525         } else {
526                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
527                        bp->dev->name,
528                        (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
529                        (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
530
531                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
532                        "%s for RX.\n",
533                        bp->dev->name,
534                        (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
535                        (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
536         }
537 }
538
539 static void b44_check_phy(struct b44 *bp)
540 {
541         u32 bmsr, aux;
542
543         if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
544             !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
545             (bmsr != 0xffff)) {
546                 if (aux & MII_AUXCTRL_SPEED)
547                         bp->flags |= B44_FLAG_100_BASE_T;
548                 else
549                         bp->flags &= ~B44_FLAG_100_BASE_T;
550                 if (aux & MII_AUXCTRL_DUPLEX)
551                         bp->flags |= B44_FLAG_FULL_DUPLEX;
552                 else
553                         bp->flags &= ~B44_FLAG_FULL_DUPLEX;
554
555                 if (!netif_carrier_ok(bp->dev) &&
556                     (bmsr & BMSR_LSTATUS)) {
557                         u32 val = br32(bp, B44_TX_CTRL);
558                         u32 local_adv, remote_adv;
559
560                         if (bp->flags & B44_FLAG_FULL_DUPLEX)
561                                 val |= TX_CTRL_DUPLEX;
562                         else
563                                 val &= ~TX_CTRL_DUPLEX;
564                         bw32(bp, B44_TX_CTRL, val);
565
566                         if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
567                             !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
568                             !b44_readphy(bp, MII_LPA, &remote_adv))
569                                 b44_set_flow_ctrl(bp, local_adv, remote_adv);
570
571                         /* Link now up */
572                         netif_carrier_on(bp->dev);
573                         b44_link_report(bp);
574                 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
575                         /* Link now down */
576                         netif_carrier_off(bp->dev);
577                         b44_link_report(bp);
578                 }
579
580                 if (bmsr & BMSR_RFAULT)
581                         printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
582                                bp->dev->name);
583                 if (bmsr & BMSR_JCD)
584                         printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
585                                bp->dev->name);
586         }
587 }
588
589 static void b44_timer(unsigned long __opaque)
590 {
591         struct b44 *bp = (struct b44 *) __opaque;
592
593         spin_lock_irq(&bp->lock);
594
595         b44_check_phy(bp);
596
597         b44_stats_update(bp);
598
599         spin_unlock_irq(&bp->lock);
600
601         mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
602 }
603
604 static void b44_tx(struct b44 *bp)
605 {
606         u32 cur, cons;
607
608         cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
609         cur /= sizeof(struct dma_desc);
610
611         /* XXX needs updating when NETIF_F_SG is supported */
612         for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
613                 struct ring_info *rp = &bp->tx_buffers[cons];
614                 struct sk_buff *skb = rp->skb;
615
616                 BUG_ON(skb == NULL);
617
618                 pci_unmap_single(bp->pdev,
619                                  pci_unmap_addr(rp, mapping),
620                                  skb->len,
621                                  PCI_DMA_TODEVICE);
622                 rp->skb = NULL;
623                 dev_kfree_skb_irq(skb);
624         }
625
626         bp->tx_cons = cons;
627         if (netif_queue_stopped(bp->dev) &&
628             TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
629                 netif_wake_queue(bp->dev);
630
631         bw32(bp, B44_GPTIMER, 0);
632 }
633
634 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
635  * before the DMA address you give it.  So we allocate 30 more bytes
636  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
637  * point the chip at 30 bytes past where the rx_header will go.
638  */
639 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
640 {
641         struct dma_desc *dp;
642         struct ring_info *src_map, *map;
643         struct rx_header *rh;
644         struct sk_buff *skb;
645         dma_addr_t mapping;
646         int dest_idx;
647         u32 ctrl;
648
649         src_map = NULL;
650         if (src_idx >= 0)
651                 src_map = &bp->rx_buffers[src_idx];
652         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
653         map = &bp->rx_buffers[dest_idx];
654         skb = dev_alloc_skb(RX_PKT_BUF_SZ);
655         if (skb == NULL)
656                 return -ENOMEM;
657
658         mapping = pci_map_single(bp->pdev, skb->data,
659                                  RX_PKT_BUF_SZ,
660                                  PCI_DMA_FROMDEVICE);
661
662         /* Hardware bug work-around, the chip is unable to do PCI DMA
663            to/from anything above 1GB :-( */
664         if (dma_mapping_error(mapping) ||
665                 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
666                 /* Sigh... */
667                 if (!dma_mapping_error(mapping))
668                         pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
669                 dev_kfree_skb_any(skb);
670                 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
671                 if (skb == NULL)
672                         return -ENOMEM;
673                 mapping = pci_map_single(bp->pdev, skb->data,
674                                          RX_PKT_BUF_SZ,
675                                          PCI_DMA_FROMDEVICE);
676                 if (dma_mapping_error(mapping) ||
677                         mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
678                         if (!dma_mapping_error(mapping))
679                                 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
680                         dev_kfree_skb_any(skb);
681                         return -ENOMEM;
682                 }
683         }
684
685         skb->dev = bp->dev;
686         skb_reserve(skb, bp->rx_offset);
687
688         rh = (struct rx_header *)
689                 (skb->data - bp->rx_offset);
690         rh->len = 0;
691         rh->flags = 0;
692
693         map->skb = skb;
694         pci_unmap_addr_set(map, mapping, mapping);
695
696         if (src_map != NULL)
697                 src_map->skb = NULL;
698
699         ctrl  = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
700         if (dest_idx == (B44_RX_RING_SIZE - 1))
701                 ctrl |= DESC_CTRL_EOT;
702
703         dp = &bp->rx_ring[dest_idx];
704         dp->ctrl = cpu_to_le32(ctrl);
705         dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
706
707         if (bp->flags & B44_FLAG_RX_RING_HACK)
708                 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
709                                              dest_idx * sizeof(dp),
710                                              DMA_BIDIRECTIONAL);
711
712         return RX_PKT_BUF_SZ;
713 }
714
715 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
716 {
717         struct dma_desc *src_desc, *dest_desc;
718         struct ring_info *src_map, *dest_map;
719         struct rx_header *rh;
720         int dest_idx;
721         __le32 ctrl;
722
723         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
724         dest_desc = &bp->rx_ring[dest_idx];
725         dest_map = &bp->rx_buffers[dest_idx];
726         src_desc = &bp->rx_ring[src_idx];
727         src_map = &bp->rx_buffers[src_idx];
728
729         dest_map->skb = src_map->skb;
730         rh = (struct rx_header *) src_map->skb->data;
731         rh->len = 0;
732         rh->flags = 0;
733         pci_unmap_addr_set(dest_map, mapping,
734                            pci_unmap_addr(src_map, mapping));
735
736         if (bp->flags & B44_FLAG_RX_RING_HACK)
737                 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
738                                           src_idx * sizeof(src_desc),
739                                           DMA_BIDIRECTIONAL);
740
741         ctrl = src_desc->ctrl;
742         if (dest_idx == (B44_RX_RING_SIZE - 1))
743                 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
744         else
745                 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
746
747         dest_desc->ctrl = ctrl;
748         dest_desc->addr = src_desc->addr;
749
750         src_map->skb = NULL;
751
752         if (bp->flags & B44_FLAG_RX_RING_HACK)
753                 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
754                                              dest_idx * sizeof(dest_desc),
755                                              DMA_BIDIRECTIONAL);
756
757         pci_dma_sync_single_for_device(bp->pdev, le32_to_cpu(src_desc->addr),
758                                        RX_PKT_BUF_SZ,
759                                        PCI_DMA_FROMDEVICE);
760 }
761
762 static int b44_rx(struct b44 *bp, int budget)
763 {
764         int received;
765         u32 cons, prod;
766
767         received = 0;
768         prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
769         prod /= sizeof(struct dma_desc);
770         cons = bp->rx_cons;
771
772         while (cons != prod && budget > 0) {
773                 struct ring_info *rp = &bp->rx_buffers[cons];
774                 struct sk_buff *skb = rp->skb;
775                 dma_addr_t map = pci_unmap_addr(rp, mapping);
776                 struct rx_header *rh;
777                 u16 len;
778
779                 pci_dma_sync_single_for_cpu(bp->pdev, map,
780                                             RX_PKT_BUF_SZ,
781                                             PCI_DMA_FROMDEVICE);
782                 rh = (struct rx_header *) skb->data;
783                 len = le16_to_cpu(rh->len);
784                 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
785                     (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
786                 drop_it:
787                         b44_recycle_rx(bp, cons, bp->rx_prod);
788                 drop_it_no_recycle:
789                         bp->stats.rx_dropped++;
790                         goto next_pkt;
791                 }
792
793                 if (len == 0) {
794                         int i = 0;
795
796                         do {
797                                 udelay(2);
798                                 barrier();
799                                 len = le16_to_cpu(rh->len);
800                         } while (len == 0 && i++ < 5);
801                         if (len == 0)
802                                 goto drop_it;
803                 }
804
805                 /* Omit CRC. */
806                 len -= 4;
807
808                 if (len > RX_COPY_THRESHOLD) {
809                         int skb_size;
810                         skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
811                         if (skb_size < 0)
812                                 goto drop_it;
813                         pci_unmap_single(bp->pdev, map,
814                                          skb_size, PCI_DMA_FROMDEVICE);
815                         /* Leave out rx_header */
816                         skb_put(skb, len+bp->rx_offset);
817                         skb_pull(skb,bp->rx_offset);
818                 } else {
819                         struct sk_buff *copy_skb;
820
821                         b44_recycle_rx(bp, cons, bp->rx_prod);
822                         copy_skb = dev_alloc_skb(len + 2);
823                         if (copy_skb == NULL)
824                                 goto drop_it_no_recycle;
825
826                         skb_reserve(copy_skb, 2);
827                         skb_put(copy_skb, len);
828                         /* DMA sync done above, copy just the actual packet */
829                         skb_copy_from_linear_data_offset(skb, bp->rx_offset,
830                                                          copy_skb->data, len);
831                         skb = copy_skb;
832                 }
833                 skb->ip_summed = CHECKSUM_NONE;
834                 skb->protocol = eth_type_trans(skb, bp->dev);
835                 netif_receive_skb(skb);
836                 bp->dev->last_rx = jiffies;
837                 received++;
838                 budget--;
839         next_pkt:
840                 bp->rx_prod = (bp->rx_prod + 1) &
841                         (B44_RX_RING_SIZE - 1);
842                 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
843         }
844
845         bp->rx_cons = cons;
846         bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
847
848         return received;
849 }
850
851 static int b44_poll(struct net_device *netdev, int *budget)
852 {
853         struct b44 *bp = netdev_priv(netdev);
854         int done;
855
856         spin_lock_irq(&bp->lock);
857
858         if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
859                 /* spin_lock(&bp->tx_lock); */
860                 b44_tx(bp);
861                 /* spin_unlock(&bp->tx_lock); */
862         }
863         spin_unlock_irq(&bp->lock);
864
865         done = 1;
866         if (bp->istat & ISTAT_RX) {
867                 int orig_budget = *budget;
868                 int work_done;
869
870                 if (orig_budget > netdev->quota)
871                         orig_budget = netdev->quota;
872
873                 work_done = b44_rx(bp, orig_budget);
874
875                 *budget -= work_done;
876                 netdev->quota -= work_done;
877
878                 if (work_done >= orig_budget)
879                         done = 0;
880         }
881
882         if (bp->istat & ISTAT_ERRORS) {
883                 unsigned long flags;
884
885                 spin_lock_irqsave(&bp->lock, flags);
886                 b44_halt(bp);
887                 b44_init_rings(bp);
888                 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
889                 netif_wake_queue(bp->dev);
890                 spin_unlock_irqrestore(&bp->lock, flags);
891                 done = 1;
892         }
893
894         if (done) {
895                 netif_rx_complete(netdev);
896                 b44_enable_ints(bp);
897         }
898
899         return (done ? 0 : 1);
900 }
901
902 static irqreturn_t b44_interrupt(int irq, void *dev_id)
903 {
904         struct net_device *dev = dev_id;
905         struct b44 *bp = netdev_priv(dev);
906         u32 istat, imask;
907         int handled = 0;
908
909         spin_lock(&bp->lock);
910
911         istat = br32(bp, B44_ISTAT);
912         imask = br32(bp, B44_IMASK);
913
914         /* The interrupt mask register controls which interrupt bits
915          * will actually raise an interrupt to the CPU when set by hw/firmware,
916          * but doesn't mask off the bits.
917          */
918         istat &= imask;
919         if (istat) {
920                 handled = 1;
921
922                 if (unlikely(!netif_running(dev))) {
923                         printk(KERN_INFO "%s: late interrupt.\n", dev->name);
924                         goto irq_ack;
925                 }
926
927                 if (netif_rx_schedule_prep(dev)) {
928                         /* NOTE: These writes are posted by the readback of
929                          *       the ISTAT register below.
930                          */
931                         bp->istat = istat;
932                         __b44_disable_ints(bp);
933                         __netif_rx_schedule(dev);
934                 } else {
935                         printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
936                                dev->name);
937                 }
938
939 irq_ack:
940                 bw32(bp, B44_ISTAT, istat);
941                 br32(bp, B44_ISTAT);
942         }
943         spin_unlock(&bp->lock);
944         return IRQ_RETVAL(handled);
945 }
946
947 static void b44_tx_timeout(struct net_device *dev)
948 {
949         struct b44 *bp = netdev_priv(dev);
950
951         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
952                dev->name);
953
954         spin_lock_irq(&bp->lock);
955
956         b44_halt(bp);
957         b44_init_rings(bp);
958         b44_init_hw(bp, B44_FULL_RESET);
959
960         spin_unlock_irq(&bp->lock);
961
962         b44_enable_ints(bp);
963
964         netif_wake_queue(dev);
965 }
966
967 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
968 {
969         struct b44 *bp = netdev_priv(dev);
970         int rc = NETDEV_TX_OK;
971         dma_addr_t mapping;
972         u32 len, entry, ctrl;
973
974         len = skb->len;
975         spin_lock_irq(&bp->lock);
976
977         /* This is a hard error, log it. */
978         if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
979                 netif_stop_queue(dev);
980                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
981                        dev->name);
982                 goto err_out;
983         }
984
985         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
986         if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
987                 struct sk_buff *bounce_skb;
988
989                 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
990                 if (!dma_mapping_error(mapping))
991                         pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
992
993                 bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA);
994                 if (!bounce_skb)
995                         goto err_out;
996
997                 mapping = pci_map_single(bp->pdev, bounce_skb->data,
998                                          len, PCI_DMA_TODEVICE);
999                 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
1000                         if (!dma_mapping_error(mapping))
1001                                 pci_unmap_single(bp->pdev, mapping,
1002                                                  len, PCI_DMA_TODEVICE);
1003                         dev_kfree_skb_any(bounce_skb);
1004                         goto err_out;
1005                 }
1006
1007                 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
1008                 dev_kfree_skb_any(skb);
1009                 skb = bounce_skb;
1010         }
1011
1012         entry = bp->tx_prod;
1013         bp->tx_buffers[entry].skb = skb;
1014         pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1015
1016         ctrl  = (len & DESC_CTRL_LEN);
1017         ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1018         if (entry == (B44_TX_RING_SIZE - 1))
1019                 ctrl |= DESC_CTRL_EOT;
1020
1021         bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1022         bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1023
1024         if (bp->flags & B44_FLAG_TX_RING_HACK)
1025                 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1026                                              entry * sizeof(bp->tx_ring[0]),
1027                                              DMA_TO_DEVICE);
1028
1029         entry = NEXT_TX(entry);
1030
1031         bp->tx_prod = entry;
1032
1033         wmb();
1034
1035         bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1036         if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1037                 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1038         if (bp->flags & B44_FLAG_REORDER_BUG)
1039                 br32(bp, B44_DMATX_PTR);
1040
1041         if (TX_BUFFS_AVAIL(bp) < 1)
1042                 netif_stop_queue(dev);
1043
1044         dev->trans_start = jiffies;
1045
1046 out_unlock:
1047         spin_unlock_irq(&bp->lock);
1048
1049         return rc;
1050
1051 err_out:
1052         rc = NETDEV_TX_BUSY;
1053         goto out_unlock;
1054 }
1055
1056 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1057 {
1058         struct b44 *bp = netdev_priv(dev);
1059
1060         if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1061                 return -EINVAL;
1062
1063         if (!netif_running(dev)) {
1064                 /* We'll just catch it later when the
1065                  * device is up'd.
1066                  */
1067                 dev->mtu = new_mtu;
1068                 return 0;
1069         }
1070
1071         spin_lock_irq(&bp->lock);
1072         b44_halt(bp);
1073         dev->mtu = new_mtu;
1074         b44_init_rings(bp);
1075         b44_init_hw(bp, B44_FULL_RESET);
1076         spin_unlock_irq(&bp->lock);
1077
1078         b44_enable_ints(bp);
1079
1080         return 0;
1081 }
1082
1083 /* Free up pending packets in all rx/tx rings.
1084  *
1085  * The chip has been shut down and the driver detached from
1086  * the networking, so no interrupts or new tx packets will
1087  * end up in the driver.  bp->lock is not held and we are not
1088  * in an interrupt context and thus may sleep.
1089  */
1090 static void b44_free_rings(struct b44 *bp)
1091 {
1092         struct ring_info *rp;
1093         int i;
1094
1095         for (i = 0; i < B44_RX_RING_SIZE; i++) {
1096                 rp = &bp->rx_buffers[i];
1097
1098                 if (rp->skb == NULL)
1099                         continue;
1100                 pci_unmap_single(bp->pdev,
1101                                  pci_unmap_addr(rp, mapping),
1102                                  RX_PKT_BUF_SZ,
1103                                  PCI_DMA_FROMDEVICE);
1104                 dev_kfree_skb_any(rp->skb);
1105                 rp->skb = NULL;
1106         }
1107
1108         /* XXX needs changes once NETIF_F_SG is set... */
1109         for (i = 0; i < B44_TX_RING_SIZE; i++) {
1110                 rp = &bp->tx_buffers[i];
1111
1112                 if (rp->skb == NULL)
1113                         continue;
1114                 pci_unmap_single(bp->pdev,
1115                                  pci_unmap_addr(rp, mapping),
1116                                  rp->skb->len,
1117                                  PCI_DMA_TODEVICE);
1118                 dev_kfree_skb_any(rp->skb);
1119                 rp->skb = NULL;
1120         }
1121 }
1122
1123 /* Initialize tx/rx rings for packet processing.
1124  *
1125  * The chip has been shut down and the driver detached from
1126  * the networking, so no interrupts or new tx packets will
1127  * end up in the driver.
1128  */
1129 static void b44_init_rings(struct b44 *bp)
1130 {
1131         int i;
1132
1133         b44_free_rings(bp);
1134
1135         memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1136         memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1137
1138         if (bp->flags & B44_FLAG_RX_RING_HACK)
1139                 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1140                                            DMA_TABLE_BYTES,
1141                                            PCI_DMA_BIDIRECTIONAL);
1142
1143         if (bp->flags & B44_FLAG_TX_RING_HACK)
1144                 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1145                                            DMA_TABLE_BYTES,
1146                                            PCI_DMA_TODEVICE);
1147
1148         for (i = 0; i < bp->rx_pending; i++) {
1149                 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1150                         break;
1151         }
1152 }
1153
1154 /*
1155  * Must not be invoked with interrupt sources disabled and
1156  * the hardware shutdown down.
1157  */
1158 static void b44_free_consistent(struct b44 *bp)
1159 {
1160         kfree(bp->rx_buffers);
1161         bp->rx_buffers = NULL;
1162         kfree(bp->tx_buffers);
1163         bp->tx_buffers = NULL;
1164         if (bp->rx_ring) {
1165                 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1166                         dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1167                                          DMA_TABLE_BYTES,
1168                                          DMA_BIDIRECTIONAL);
1169                         kfree(bp->rx_ring);
1170                 } else
1171                         pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1172                                             bp->rx_ring, bp->rx_ring_dma);
1173                 bp->rx_ring = NULL;
1174                 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1175         }
1176         if (bp->tx_ring) {
1177                 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1178                         dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1179                                          DMA_TABLE_BYTES,
1180                                          DMA_TO_DEVICE);
1181                         kfree(bp->tx_ring);
1182                 } else
1183                         pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1184                                             bp->tx_ring, bp->tx_ring_dma);
1185                 bp->tx_ring = NULL;
1186                 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1187         }
1188 }
1189
1190 /*
1191  * Must not be invoked with interrupt sources disabled and
1192  * the hardware shutdown down.  Can sleep.
1193  */
1194 static int b44_alloc_consistent(struct b44 *bp)
1195 {
1196         int size;
1197
1198         size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1199         bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1200         if (!bp->rx_buffers)
1201                 goto out_err;
1202
1203         size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1204         bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1205         if (!bp->tx_buffers)
1206                 goto out_err;
1207
1208         size = DMA_TABLE_BYTES;
1209         bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1210         if (!bp->rx_ring) {
1211                 /* Allocation may have failed due to pci_alloc_consistent
1212                    insisting on use of GFP_DMA, which is more restrictive
1213                    than necessary...  */
1214                 struct dma_desc *rx_ring;
1215                 dma_addr_t rx_ring_dma;
1216
1217                 rx_ring = kzalloc(size, GFP_KERNEL);
1218                 if (!rx_ring)
1219                         goto out_err;
1220
1221                 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1222                                              DMA_TABLE_BYTES,
1223                                              DMA_BIDIRECTIONAL);
1224
1225                 if (dma_mapping_error(rx_ring_dma) ||
1226                         rx_ring_dma + size > DMA_30BIT_MASK) {
1227                         kfree(rx_ring);
1228                         goto out_err;
1229                 }
1230
1231                 bp->rx_ring = rx_ring;
1232                 bp->rx_ring_dma = rx_ring_dma;
1233                 bp->flags |= B44_FLAG_RX_RING_HACK;
1234         }
1235
1236         bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1237         if (!bp->tx_ring) {
1238                 /* Allocation may have failed due to pci_alloc_consistent
1239                    insisting on use of GFP_DMA, which is more restrictive
1240                    than necessary...  */
1241                 struct dma_desc *tx_ring;
1242                 dma_addr_t tx_ring_dma;
1243
1244                 tx_ring = kzalloc(size, GFP_KERNEL);
1245                 if (!tx_ring)
1246                         goto out_err;
1247
1248                 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1249                                              DMA_TABLE_BYTES,
1250                                              DMA_TO_DEVICE);
1251
1252                 if (dma_mapping_error(tx_ring_dma) ||
1253                         tx_ring_dma + size > DMA_30BIT_MASK) {
1254                         kfree(tx_ring);
1255                         goto out_err;
1256                 }
1257
1258                 bp->tx_ring = tx_ring;
1259                 bp->tx_ring_dma = tx_ring_dma;
1260                 bp->flags |= B44_FLAG_TX_RING_HACK;
1261         }
1262
1263         return 0;
1264
1265 out_err:
1266         b44_free_consistent(bp);
1267         return -ENOMEM;
1268 }
1269
1270 /* bp->lock is held. */
1271 static void b44_clear_stats(struct b44 *bp)
1272 {
1273         unsigned long reg;
1274
1275         bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1276         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1277                 br32(bp, reg);
1278         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1279                 br32(bp, reg);
1280 }
1281
1282 /* bp->lock is held. */
1283 static void b44_chip_reset(struct b44 *bp)
1284 {
1285         if (ssb_is_core_up(bp)) {
1286                 bw32(bp, B44_RCV_LAZY, 0);
1287                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1288                 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1289                 bw32(bp, B44_DMATX_CTRL, 0);
1290                 bp->tx_prod = bp->tx_cons = 0;
1291                 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1292                         b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1293                                      100, 0);
1294                 }
1295                 bw32(bp, B44_DMARX_CTRL, 0);
1296                 bp->rx_prod = bp->rx_cons = 0;
1297         } else {
1298                 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1299                                    SBINTVEC_ENET0 :
1300                                    SBINTVEC_ENET1));
1301         }
1302
1303         ssb_core_reset(bp);
1304
1305         b44_clear_stats(bp);
1306
1307         /* Make PHY accessible. */
1308         bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1309                              (0x0d & MDIO_CTRL_MAXF_MASK)));
1310         br32(bp, B44_MDIO_CTRL);
1311
1312         if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1313                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1314                 br32(bp, B44_ENET_CTRL);
1315                 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1316         } else {
1317                 u32 val = br32(bp, B44_DEVCTRL);
1318
1319                 if (val & DEVCTRL_EPR) {
1320                         bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1321                         br32(bp, B44_DEVCTRL);
1322                         udelay(100);
1323                 }
1324                 bp->flags |= B44_FLAG_INTERNAL_PHY;
1325         }
1326 }
1327
1328 /* bp->lock is held. */
1329 static void b44_halt(struct b44 *bp)
1330 {
1331         b44_disable_ints(bp);
1332         b44_chip_reset(bp);
1333 }
1334
1335 /* bp->lock is held. */
1336 static void __b44_set_mac_addr(struct b44 *bp)
1337 {
1338         bw32(bp, B44_CAM_CTRL, 0);
1339         if (!(bp->dev->flags & IFF_PROMISC)) {
1340                 u32 val;
1341
1342                 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1343                 val = br32(bp, B44_CAM_CTRL);
1344                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1345         }
1346 }
1347
1348 static int b44_set_mac_addr(struct net_device *dev, void *p)
1349 {
1350         struct b44 *bp = netdev_priv(dev);
1351         struct sockaddr *addr = p;
1352
1353         if (netif_running(dev))
1354                 return -EBUSY;
1355
1356         if (!is_valid_ether_addr(addr->sa_data))
1357                 return -EINVAL;
1358
1359         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1360
1361         spin_lock_irq(&bp->lock);
1362         __b44_set_mac_addr(bp);
1363         spin_unlock_irq(&bp->lock);
1364
1365         return 0;
1366 }
1367
1368 /* Called at device open time to get the chip ready for
1369  * packet processing.  Invoked with bp->lock held.
1370  */
1371 static void __b44_set_rx_mode(struct net_device *);
1372 static void b44_init_hw(struct b44 *bp, int reset_kind)
1373 {
1374         u32 val;
1375
1376         b44_chip_reset(bp);
1377         if (reset_kind == B44_FULL_RESET) {
1378                 b44_phy_reset(bp);
1379                 b44_setup_phy(bp);
1380         }
1381
1382         /* Enable CRC32, set proper LED modes and power on PHY */
1383         bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1384         bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1385
1386         /* This sets the MAC address too.  */
1387         __b44_set_rx_mode(bp->dev);
1388
1389         /* MTU + eth header + possible VLAN tag + struct rx_header */
1390         bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1391         bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1392
1393         bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1394         if (reset_kind == B44_PARTIAL_RESET) {
1395                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1396                                       (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1397         } else {
1398                 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1399                 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1400                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1401                                       (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1402                 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1403
1404                 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1405                 bp->rx_prod = bp->rx_pending;
1406
1407                 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1408         }
1409
1410         val = br32(bp, B44_ENET_CTRL);
1411         bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1412 }
1413
1414 static int b44_open(struct net_device *dev)
1415 {
1416         struct b44 *bp = netdev_priv(dev);
1417         int err;
1418
1419         err = b44_alloc_consistent(bp);
1420         if (err)
1421                 goto out;
1422
1423         b44_init_rings(bp);
1424         b44_init_hw(bp, B44_FULL_RESET);
1425
1426         b44_check_phy(bp);
1427
1428         err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1429         if (unlikely(err < 0)) {
1430                 b44_chip_reset(bp);
1431                 b44_free_rings(bp);
1432                 b44_free_consistent(bp);
1433                 goto out;
1434         }
1435
1436         init_timer(&bp->timer);
1437         bp->timer.expires = jiffies + HZ;
1438         bp->timer.data = (unsigned long) bp;
1439         bp->timer.function = b44_timer;
1440         add_timer(&bp->timer);
1441
1442         b44_enable_ints(bp);
1443         netif_start_queue(dev);
1444 out:
1445         return err;
1446 }
1447
1448 #if 0
1449 /*static*/ void b44_dump_state(struct b44 *bp)
1450 {
1451         u32 val32, val32_2, val32_3, val32_4, val32_5;
1452         u16 val16;
1453
1454         pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1455         printk("DEBUG: PCI status [%04x] \n", val16);
1456
1457 }
1458 #endif
1459
1460 #ifdef CONFIG_NET_POLL_CONTROLLER
1461 /*
1462  * Polling receive - used by netconsole and other diagnostic tools
1463  * to allow network i/o with interrupts disabled.
1464  */
1465 static void b44_poll_controller(struct net_device *dev)
1466 {
1467         disable_irq(dev->irq);
1468         b44_interrupt(dev->irq, dev);
1469         enable_irq(dev->irq);
1470 }
1471 #endif
1472
1473 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1474 {
1475         u32 i;
1476         u32 *pattern = (u32 *) pp;
1477
1478         for (i = 0; i < bytes; i += sizeof(u32)) {
1479                 bw32(bp, B44_FILT_ADDR, table_offset + i);
1480                 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1481         }
1482 }
1483
1484 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1485 {
1486         int magicsync = 6;
1487         int k, j, len = offset;
1488         int ethaddr_bytes = ETH_ALEN;
1489
1490         memset(ppattern + offset, 0xff, magicsync);
1491         for (j = 0; j < magicsync; j++)
1492                 set_bit(len++, (unsigned long *) pmask);
1493
1494         for (j = 0; j < B44_MAX_PATTERNS; j++) {
1495                 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1496                         ethaddr_bytes = ETH_ALEN;
1497                 else
1498                         ethaddr_bytes = B44_PATTERN_SIZE - len;
1499                 if (ethaddr_bytes <=0)
1500                         break;
1501                 for (k = 0; k< ethaddr_bytes; k++) {
1502                         ppattern[offset + magicsync +
1503                                 (j * ETH_ALEN) + k] = macaddr[k];
1504                         len++;
1505                         set_bit(len, (unsigned long *) pmask);
1506                 }
1507         }
1508         return len - 1;
1509 }
1510
1511 /* Setup magic packet patterns in the b44 WOL
1512  * pattern matching filter.
1513  */
1514 static void b44_setup_pseudo_magicp(struct b44 *bp)
1515 {
1516
1517         u32 val;
1518         int plen0, plen1, plen2;
1519         u8 *pwol_pattern;
1520         u8 pwol_mask[B44_PMASK_SIZE];
1521
1522         pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1523         if (!pwol_pattern) {
1524                 printk(KERN_ERR PFX "Memory not available for WOL\n");
1525                 return;
1526         }
1527
1528         /* Ipv4 magic packet pattern - pattern 0.*/
1529         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1530         memset(pwol_mask, 0, B44_PMASK_SIZE);
1531         plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1532                                   B44_ETHIPV4UDP_HLEN);
1533
1534         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1535         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1536
1537         /* Raw ethernet II magic packet pattern - pattern 1 */
1538         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1539         memset(pwol_mask, 0, B44_PMASK_SIZE);
1540         plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1541                                   ETH_HLEN);
1542
1543         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1544                        B44_PATTERN_BASE + B44_PATTERN_SIZE);
1545         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1546                        B44_PMASK_BASE + B44_PMASK_SIZE);
1547
1548         /* Ipv6 magic packet pattern - pattern 2 */
1549         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1550         memset(pwol_mask, 0, B44_PMASK_SIZE);
1551         plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1552                                   B44_ETHIPV6UDP_HLEN);
1553
1554         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1555                        B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1556         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1557                        B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1558
1559         kfree(pwol_pattern);
1560
1561         /* set these pattern's lengths: one less than each real length */
1562         val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1563         bw32(bp, B44_WKUP_LEN, val);
1564
1565         /* enable wakeup pattern matching */
1566         val = br32(bp, B44_DEVCTRL);
1567         bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1568
1569 }
1570
1571 static void b44_setup_wol(struct b44 *bp)
1572 {
1573         u32 val;
1574         u16 pmval;
1575
1576         bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1577
1578         if (bp->flags & B44_FLAG_B0_ANDLATER) {
1579
1580                 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1581
1582                 val = bp->dev->dev_addr[2] << 24 |
1583                         bp->dev->dev_addr[3] << 16 |
1584                         bp->dev->dev_addr[4] << 8 |
1585                         bp->dev->dev_addr[5];
1586                 bw32(bp, B44_ADDR_LO, val);
1587
1588                 val = bp->dev->dev_addr[0] << 8 |
1589                         bp->dev->dev_addr[1];
1590                 bw32(bp, B44_ADDR_HI, val);
1591
1592                 val = br32(bp, B44_DEVCTRL);
1593                 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1594
1595         } else {
1596                 b44_setup_pseudo_magicp(bp);
1597         }
1598
1599         val = br32(bp, B44_SBTMSLOW);
1600         bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1601
1602         pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1603         pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1604
1605 }
1606
1607 static int b44_close(struct net_device *dev)
1608 {
1609         struct b44 *bp = netdev_priv(dev);
1610
1611         netif_stop_queue(dev);
1612
1613         netif_poll_disable(dev);
1614
1615         del_timer_sync(&bp->timer);
1616
1617         spin_lock_irq(&bp->lock);
1618
1619 #if 0
1620         b44_dump_state(bp);
1621 #endif
1622         b44_halt(bp);
1623         b44_free_rings(bp);
1624         netif_carrier_off(dev);
1625
1626         spin_unlock_irq(&bp->lock);
1627
1628         free_irq(dev->irq, dev);
1629
1630         netif_poll_enable(dev);
1631
1632         if (bp->flags & B44_FLAG_WOL_ENABLE) {
1633                 b44_init_hw(bp, B44_PARTIAL_RESET);
1634                 b44_setup_wol(bp);
1635         }
1636
1637         b44_free_consistent(bp);
1638
1639         return 0;
1640 }
1641
1642 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1643 {
1644         struct b44 *bp = netdev_priv(dev);
1645         struct net_device_stats *nstat = &bp->stats;
1646         struct b44_hw_stats *hwstat = &bp->hw_stats;
1647
1648         /* Convert HW stats into netdevice stats. */
1649         nstat->rx_packets = hwstat->rx_pkts;
1650         nstat->tx_packets = hwstat->tx_pkts;
1651         nstat->rx_bytes   = hwstat->rx_octets;
1652         nstat->tx_bytes   = hwstat->tx_octets;
1653         nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1654                              hwstat->tx_oversize_pkts +
1655                              hwstat->tx_underruns +
1656                              hwstat->tx_excessive_cols +
1657                              hwstat->tx_late_cols);
1658         nstat->multicast  = hwstat->tx_multicast_pkts;
1659         nstat->collisions = hwstat->tx_total_cols;
1660
1661         nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1662                                    hwstat->rx_undersize);
1663         nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1664         nstat->rx_frame_errors  = hwstat->rx_align_errs;
1665         nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1666         nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1667                                    hwstat->rx_oversize_pkts +
1668                                    hwstat->rx_missed_pkts +
1669                                    hwstat->rx_crc_align_errs +
1670                                    hwstat->rx_undersize +
1671                                    hwstat->rx_crc_errs +
1672                                    hwstat->rx_align_errs +
1673                                    hwstat->rx_symbol_errs);
1674
1675         nstat->tx_aborted_errors = hwstat->tx_underruns;
1676 #if 0
1677         /* Carrier lost counter seems to be broken for some devices */
1678         nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1679 #endif
1680
1681         return nstat;
1682 }
1683
1684 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1685 {
1686         struct dev_mc_list *mclist;
1687         int i, num_ents;
1688
1689         num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1690         mclist = dev->mc_list;
1691         for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1692                 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1693         }
1694         return i+1;
1695 }
1696
1697 static void __b44_set_rx_mode(struct net_device *dev)
1698 {
1699         struct b44 *bp = netdev_priv(dev);
1700         u32 val;
1701
1702         val = br32(bp, B44_RXCONFIG);
1703         val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1704         if (dev->flags & IFF_PROMISC) {
1705                 val |= RXCONFIG_PROMISC;
1706                 bw32(bp, B44_RXCONFIG, val);
1707         } else {
1708                 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1709                 int i = 1;
1710
1711                 __b44_set_mac_addr(bp);
1712
1713                 if ((dev->flags & IFF_ALLMULTI) ||
1714                     (dev->mc_count > B44_MCAST_TABLE_SIZE))
1715                         val |= RXCONFIG_ALLMULTI;
1716                 else
1717                         i = __b44_load_mcast(bp, dev);
1718
1719                 for (; i < 64; i++)
1720                         __b44_cam_write(bp, zero, i);
1721
1722                 bw32(bp, B44_RXCONFIG, val);
1723                 val = br32(bp, B44_CAM_CTRL);
1724                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1725         }
1726 }
1727
1728 static void b44_set_rx_mode(struct net_device *dev)
1729 {
1730         struct b44 *bp = netdev_priv(dev);
1731
1732         spin_lock_irq(&bp->lock);
1733         __b44_set_rx_mode(dev);
1734         spin_unlock_irq(&bp->lock);
1735 }
1736
1737 static u32 b44_get_msglevel(struct net_device *dev)
1738 {
1739         struct b44 *bp = netdev_priv(dev);
1740         return bp->msg_enable;
1741 }
1742
1743 static void b44_set_msglevel(struct net_device *dev, u32 value)
1744 {
1745         struct b44 *bp = netdev_priv(dev);
1746         bp->msg_enable = value;
1747 }
1748
1749 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1750 {
1751         struct b44 *bp = netdev_priv(dev);
1752         struct pci_dev *pci_dev = bp->pdev;
1753
1754         strcpy (info->driver, DRV_MODULE_NAME);
1755         strcpy (info->version, DRV_MODULE_VERSION);
1756         strcpy (info->bus_info, pci_name(pci_dev));
1757 }
1758
1759 static int b44_nway_reset(struct net_device *dev)
1760 {
1761         struct b44 *bp = netdev_priv(dev);
1762         u32 bmcr;
1763         int r;
1764
1765         spin_lock_irq(&bp->lock);
1766         b44_readphy(bp, MII_BMCR, &bmcr);
1767         b44_readphy(bp, MII_BMCR, &bmcr);
1768         r = -EINVAL;
1769         if (bmcr & BMCR_ANENABLE) {
1770                 b44_writephy(bp, MII_BMCR,
1771                              bmcr | BMCR_ANRESTART);
1772                 r = 0;
1773         }
1774         spin_unlock_irq(&bp->lock);
1775
1776         return r;
1777 }
1778
1779 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1780 {
1781         struct b44 *bp = netdev_priv(dev);
1782
1783         cmd->supported = (SUPPORTED_Autoneg);
1784         cmd->supported |= (SUPPORTED_100baseT_Half |
1785                           SUPPORTED_100baseT_Full |
1786                           SUPPORTED_10baseT_Half |
1787                           SUPPORTED_10baseT_Full |
1788                           SUPPORTED_MII);
1789
1790         cmd->advertising = 0;
1791         if (bp->flags & B44_FLAG_ADV_10HALF)
1792                 cmd->advertising |= ADVERTISED_10baseT_Half;
1793         if (bp->flags & B44_FLAG_ADV_10FULL)
1794                 cmd->advertising |= ADVERTISED_10baseT_Full;
1795         if (bp->flags & B44_FLAG_ADV_100HALF)
1796                 cmd->advertising |= ADVERTISED_100baseT_Half;
1797         if (bp->flags & B44_FLAG_ADV_100FULL)
1798                 cmd->advertising |= ADVERTISED_100baseT_Full;
1799         cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1800         cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1801                 SPEED_100 : SPEED_10;
1802         cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1803                 DUPLEX_FULL : DUPLEX_HALF;
1804         cmd->port = 0;
1805         cmd->phy_address = bp->phy_addr;
1806         cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1807                 XCVR_INTERNAL : XCVR_EXTERNAL;
1808         cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1809                 AUTONEG_DISABLE : AUTONEG_ENABLE;
1810         if (cmd->autoneg == AUTONEG_ENABLE)
1811                 cmd->advertising |= ADVERTISED_Autoneg;
1812         if (!netif_running(dev)){
1813                 cmd->speed = 0;
1814                 cmd->duplex = 0xff;
1815         }
1816         cmd->maxtxpkt = 0;
1817         cmd->maxrxpkt = 0;
1818         return 0;
1819 }
1820
1821 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1822 {
1823         struct b44 *bp = netdev_priv(dev);
1824
1825         /* We do not support gigabit. */
1826         if (cmd->autoneg == AUTONEG_ENABLE) {
1827                 if (cmd->advertising &
1828                     (ADVERTISED_1000baseT_Half |
1829                      ADVERTISED_1000baseT_Full))
1830                         return -EINVAL;
1831         } else if ((cmd->speed != SPEED_100 &&
1832                     cmd->speed != SPEED_10) ||
1833                    (cmd->duplex != DUPLEX_HALF &&
1834                     cmd->duplex != DUPLEX_FULL)) {
1835                         return -EINVAL;
1836         }
1837
1838         spin_lock_irq(&bp->lock);
1839
1840         if (cmd->autoneg == AUTONEG_ENABLE) {
1841                 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1842                                B44_FLAG_100_BASE_T |
1843                                B44_FLAG_FULL_DUPLEX |
1844                                B44_FLAG_ADV_10HALF |
1845                                B44_FLAG_ADV_10FULL |
1846                                B44_FLAG_ADV_100HALF |
1847                                B44_FLAG_ADV_100FULL);
1848                 if (cmd->advertising == 0) {
1849                         bp->flags |= (B44_FLAG_ADV_10HALF |
1850                                       B44_FLAG_ADV_10FULL |
1851                                       B44_FLAG_ADV_100HALF |
1852                                       B44_FLAG_ADV_100FULL);
1853                 } else {
1854                         if (cmd->advertising & ADVERTISED_10baseT_Half)
1855                                 bp->flags |= B44_FLAG_ADV_10HALF;
1856                         if (cmd->advertising & ADVERTISED_10baseT_Full)
1857                                 bp->flags |= B44_FLAG_ADV_10FULL;
1858                         if (cmd->advertising & ADVERTISED_100baseT_Half)
1859                                 bp->flags |= B44_FLAG_ADV_100HALF;
1860                         if (cmd->advertising & ADVERTISED_100baseT_Full)
1861                                 bp->flags |= B44_FLAG_ADV_100FULL;
1862                 }
1863         } else {
1864                 bp->flags |= B44_FLAG_FORCE_LINK;
1865                 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1866                 if (cmd->speed == SPEED_100)
1867                         bp->flags |= B44_FLAG_100_BASE_T;
1868                 if (cmd->duplex == DUPLEX_FULL)
1869                         bp->flags |= B44_FLAG_FULL_DUPLEX;
1870         }
1871
1872         if (netif_running(dev))
1873                 b44_setup_phy(bp);
1874
1875         spin_unlock_irq(&bp->lock);
1876
1877         return 0;
1878 }
1879
1880 static void b44_get_ringparam(struct net_device *dev,
1881                               struct ethtool_ringparam *ering)
1882 {
1883         struct b44 *bp = netdev_priv(dev);
1884
1885         ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1886         ering->rx_pending = bp->rx_pending;
1887
1888         /* XXX ethtool lacks a tx_max_pending, oops... */
1889 }
1890
1891 static int b44_set_ringparam(struct net_device *dev,
1892                              struct ethtool_ringparam *ering)
1893 {
1894         struct b44 *bp = netdev_priv(dev);
1895
1896         if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1897             (ering->rx_mini_pending != 0) ||
1898             (ering->rx_jumbo_pending != 0) ||
1899             (ering->tx_pending > B44_TX_RING_SIZE - 1))
1900                 return -EINVAL;
1901
1902         spin_lock_irq(&bp->lock);
1903
1904         bp->rx_pending = ering->rx_pending;
1905         bp->tx_pending = ering->tx_pending;
1906
1907         b44_halt(bp);
1908         b44_init_rings(bp);
1909         b44_init_hw(bp, B44_FULL_RESET);
1910         netif_wake_queue(bp->dev);
1911         spin_unlock_irq(&bp->lock);
1912
1913         b44_enable_ints(bp);
1914
1915         return 0;
1916 }
1917
1918 static void b44_get_pauseparam(struct net_device *dev,
1919                                 struct ethtool_pauseparam *epause)
1920 {
1921         struct b44 *bp = netdev_priv(dev);
1922
1923         epause->autoneg =
1924                 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1925         epause->rx_pause =
1926                 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1927         epause->tx_pause =
1928                 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1929 }
1930
1931 static int b44_set_pauseparam(struct net_device *dev,
1932                                 struct ethtool_pauseparam *epause)
1933 {
1934         struct b44 *bp = netdev_priv(dev);
1935
1936         spin_lock_irq(&bp->lock);
1937         if (epause->autoneg)
1938                 bp->flags |= B44_FLAG_PAUSE_AUTO;
1939         else
1940                 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1941         if (epause->rx_pause)
1942                 bp->flags |= B44_FLAG_RX_PAUSE;
1943         else
1944                 bp->flags &= ~B44_FLAG_RX_PAUSE;
1945         if (epause->tx_pause)
1946                 bp->flags |= B44_FLAG_TX_PAUSE;
1947         else
1948                 bp->flags &= ~B44_FLAG_TX_PAUSE;
1949         if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1950                 b44_halt(bp);
1951                 b44_init_rings(bp);
1952                 b44_init_hw(bp, B44_FULL_RESET);
1953         } else {
1954                 __b44_set_flow_ctrl(bp, bp->flags);
1955         }
1956         spin_unlock_irq(&bp->lock);
1957
1958         b44_enable_ints(bp);
1959
1960         return 0;
1961 }
1962
1963 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1964 {
1965         switch(stringset) {
1966         case ETH_SS_STATS:
1967                 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1968                 break;
1969         }
1970 }
1971
1972 static int b44_get_stats_count(struct net_device *dev)
1973 {
1974         return ARRAY_SIZE(b44_gstrings);
1975 }
1976
1977 static void b44_get_ethtool_stats(struct net_device *dev,
1978                                   struct ethtool_stats *stats, u64 *data)
1979 {
1980         struct b44 *bp = netdev_priv(dev);
1981         u32 *val = &bp->hw_stats.tx_good_octets;
1982         u32 i;
1983
1984         spin_lock_irq(&bp->lock);
1985
1986         b44_stats_update(bp);
1987
1988         for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1989                 *data++ = *val++;
1990
1991         spin_unlock_irq(&bp->lock);
1992 }
1993
1994 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1995 {
1996         struct b44 *bp = netdev_priv(dev);
1997
1998         wol->supported = WAKE_MAGIC;
1999         if (bp->flags & B44_FLAG_WOL_ENABLE)
2000                 wol->wolopts = WAKE_MAGIC;
2001         else
2002                 wol->wolopts = 0;
2003         memset(&wol->sopass, 0, sizeof(wol->sopass));
2004 }
2005
2006 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2007 {
2008         struct b44 *bp = netdev_priv(dev);
2009
2010         spin_lock_irq(&bp->lock);
2011         if (wol->wolopts & WAKE_MAGIC)
2012                 bp->flags |= B44_FLAG_WOL_ENABLE;
2013         else
2014                 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2015         spin_unlock_irq(&bp->lock);
2016
2017         return 0;
2018 }
2019
2020 static const struct ethtool_ops b44_ethtool_ops = {
2021         .get_drvinfo            = b44_get_drvinfo,
2022         .get_settings           = b44_get_settings,
2023         .set_settings           = b44_set_settings,
2024         .nway_reset             = b44_nway_reset,
2025         .get_link               = ethtool_op_get_link,
2026         .get_wol                = b44_get_wol,
2027         .set_wol                = b44_set_wol,
2028         .get_ringparam          = b44_get_ringparam,
2029         .set_ringparam          = b44_set_ringparam,
2030         .get_pauseparam         = b44_get_pauseparam,
2031         .set_pauseparam         = b44_set_pauseparam,
2032         .get_msglevel           = b44_get_msglevel,
2033         .set_msglevel           = b44_set_msglevel,
2034         .get_strings            = b44_get_strings,
2035         .get_stats_count        = b44_get_stats_count,
2036         .get_ethtool_stats      = b44_get_ethtool_stats,
2037         .get_perm_addr          = ethtool_op_get_perm_addr,
2038 };
2039
2040 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2041 {
2042         struct mii_ioctl_data *data = if_mii(ifr);
2043         struct b44 *bp = netdev_priv(dev);
2044         int err = -EINVAL;
2045
2046         if (!netif_running(dev))
2047                 goto out;
2048
2049         spin_lock_irq(&bp->lock);
2050         err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2051         spin_unlock_irq(&bp->lock);
2052 out:
2053         return err;
2054 }
2055
2056 /* Read 128-bytes of EEPROM. */
2057 static int b44_read_eeprom(struct b44 *bp, u8 *data)
2058 {
2059         long i;
2060         __le16 *ptr = (__le16 *) data;
2061
2062         for (i = 0; i < 128; i += 2)
2063                 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
2064
2065         return 0;
2066 }
2067
2068 static int __devinit b44_get_invariants(struct b44 *bp)
2069 {
2070         u8 eeprom[128];
2071         int err;
2072
2073         err = b44_read_eeprom(bp, &eeprom[0]);
2074         if (err)
2075                 goto out;
2076
2077         bp->dev->dev_addr[0] = eeprom[79];
2078         bp->dev->dev_addr[1] = eeprom[78];
2079         bp->dev->dev_addr[2] = eeprom[81];
2080         bp->dev->dev_addr[3] = eeprom[80];
2081         bp->dev->dev_addr[4] = eeprom[83];
2082         bp->dev->dev_addr[5] = eeprom[82];
2083
2084         if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2085                 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2086                 return -EINVAL;
2087         }
2088
2089         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2090
2091         bp->phy_addr = eeprom[90] & 0x1f;
2092
2093         /* With this, plus the rx_header prepended to the data by the
2094          * hardware, we'll land the ethernet header on a 2-byte boundary.
2095          */
2096         bp->rx_offset = 30;
2097
2098         bp->imask = IMASK_DEF;
2099
2100         bp->core_unit = ssb_core_unit(bp);
2101         bp->dma_offset = SB_PCI_DMA;
2102
2103         /* XXX - really required?
2104            bp->flags |= B44_FLAG_BUGGY_TXPTR;
2105          */
2106
2107         if (ssb_get_core_rev(bp) >= 7)
2108                 bp->flags |= B44_FLAG_B0_ANDLATER;
2109
2110 out:
2111         return err;
2112 }
2113
2114 static int __devinit b44_init_one(struct pci_dev *pdev,
2115                                   const struct pci_device_id *ent)
2116 {
2117         static int b44_version_printed = 0;
2118         unsigned long b44reg_base, b44reg_len;
2119         struct net_device *dev;
2120         struct b44 *bp;
2121         int err, i;
2122
2123         if (b44_version_printed++ == 0)
2124                 printk(KERN_INFO "%s", version);
2125
2126         err = pci_enable_device(pdev);
2127         if (err) {
2128                 dev_err(&pdev->dev, "Cannot enable PCI device, "
2129                        "aborting.\n");
2130                 return err;
2131         }
2132
2133         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2134                 dev_err(&pdev->dev,
2135                         "Cannot find proper PCI device "
2136                        "base address, aborting.\n");
2137                 err = -ENODEV;
2138                 goto err_out_disable_pdev;
2139         }
2140
2141         err = pci_request_regions(pdev, DRV_MODULE_NAME);
2142         if (err) {
2143                 dev_err(&pdev->dev,
2144                         "Cannot obtain PCI resources, aborting.\n");
2145                 goto err_out_disable_pdev;
2146         }
2147
2148         pci_set_master(pdev);
2149
2150         err = pci_set_dma_mask(pdev, (u64) DMA_30BIT_MASK);
2151         if (err) {
2152                 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2153                 goto err_out_free_res;
2154         }
2155
2156         err = pci_set_consistent_dma_mask(pdev, (u64) DMA_30BIT_MASK);
2157         if (err) {
2158                 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2159                 goto err_out_free_res;
2160         }
2161
2162         b44reg_base = pci_resource_start(pdev, 0);
2163         b44reg_len = pci_resource_len(pdev, 0);
2164
2165         dev = alloc_etherdev(sizeof(*bp));
2166         if (!dev) {
2167                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
2168                 err = -ENOMEM;
2169                 goto err_out_free_res;
2170         }
2171
2172         SET_MODULE_OWNER(dev);
2173         SET_NETDEV_DEV(dev,&pdev->dev);
2174
2175         /* No interesting netdevice features in this card... */
2176         dev->features |= 0;
2177
2178         bp = netdev_priv(dev);
2179         bp->pdev = pdev;
2180         bp->dev = dev;
2181
2182         bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2183
2184         spin_lock_init(&bp->lock);
2185
2186         bp->regs = ioremap(b44reg_base, b44reg_len);
2187         if (bp->regs == 0UL) {
2188                 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
2189                 err = -ENOMEM;
2190                 goto err_out_free_dev;
2191         }
2192
2193         bp->rx_pending = B44_DEF_RX_RING_PENDING;
2194         bp->tx_pending = B44_DEF_TX_RING_PENDING;
2195
2196         dev->open = b44_open;
2197         dev->stop = b44_close;
2198         dev->hard_start_xmit = b44_start_xmit;
2199         dev->get_stats = b44_get_stats;
2200         dev->set_multicast_list = b44_set_rx_mode;
2201         dev->set_mac_address = b44_set_mac_addr;
2202         dev->do_ioctl = b44_ioctl;
2203         dev->tx_timeout = b44_tx_timeout;
2204         dev->poll = b44_poll;
2205         dev->weight = 64;
2206         dev->watchdog_timeo = B44_TX_TIMEOUT;
2207 #ifdef CONFIG_NET_POLL_CONTROLLER
2208         dev->poll_controller = b44_poll_controller;
2209 #endif
2210         dev->change_mtu = b44_change_mtu;
2211         dev->irq = pdev->irq;
2212         SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2213
2214         netif_carrier_off(dev);
2215
2216         err = b44_get_invariants(bp);
2217         if (err) {
2218                 dev_err(&pdev->dev,
2219                         "Problem fetching invariants of chip, aborting.\n");
2220                 goto err_out_iounmap;
2221         }
2222
2223         bp->mii_if.dev = dev;
2224         bp->mii_if.mdio_read = b44_mii_read;
2225         bp->mii_if.mdio_write = b44_mii_write;
2226         bp->mii_if.phy_id = bp->phy_addr;
2227         bp->mii_if.phy_id_mask = 0x1f;
2228         bp->mii_if.reg_num_mask = 0x1f;
2229
2230         /* By default, advertise all speed/duplex settings. */
2231         bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2232                       B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2233
2234         /* By default, auto-negotiate PAUSE. */
2235         bp->flags |= B44_FLAG_PAUSE_AUTO;
2236
2237         err = register_netdev(dev);
2238         if (err) {
2239                 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2240                 goto err_out_iounmap;
2241         }
2242
2243         pci_set_drvdata(pdev, dev);
2244
2245         pci_save_state(bp->pdev);
2246
2247         /* Chip reset provides power to the b44 MAC & PCI cores, which
2248          * is necessary for MAC register access.
2249          */
2250         b44_chip_reset(bp);
2251
2252         printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2253         for (i = 0; i < 6; i++)
2254                 printk("%2.2x%c", dev->dev_addr[i],
2255                        i == 5 ? '\n' : ':');
2256
2257         return 0;
2258
2259 err_out_iounmap:
2260         iounmap(bp->regs);
2261
2262 err_out_free_dev:
2263         free_netdev(dev);
2264
2265 err_out_free_res:
2266         pci_release_regions(pdev);
2267
2268 err_out_disable_pdev:
2269         pci_disable_device(pdev);
2270         pci_set_drvdata(pdev, NULL);
2271         return err;
2272 }
2273
2274 static void __devexit b44_remove_one(struct pci_dev *pdev)
2275 {
2276         struct net_device *dev = pci_get_drvdata(pdev);
2277         struct b44 *bp = netdev_priv(dev);
2278
2279         unregister_netdev(dev);
2280         iounmap(bp->regs);
2281         free_netdev(dev);
2282         pci_release_regions(pdev);
2283         pci_disable_device(pdev);
2284         pci_set_drvdata(pdev, NULL);
2285 }
2286
2287 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2288 {
2289         struct net_device *dev = pci_get_drvdata(pdev);
2290         struct b44 *bp = netdev_priv(dev);
2291
2292         if (!netif_running(dev))
2293                  return 0;
2294
2295         del_timer_sync(&bp->timer);
2296
2297         spin_lock_irq(&bp->lock);
2298
2299         b44_halt(bp);
2300         netif_carrier_off(bp->dev);
2301         netif_device_detach(bp->dev);
2302         b44_free_rings(bp);
2303
2304         spin_unlock_irq(&bp->lock);
2305
2306         free_irq(dev->irq, dev);
2307         if (bp->flags & B44_FLAG_WOL_ENABLE) {
2308                 b44_init_hw(bp, B44_PARTIAL_RESET);
2309                 b44_setup_wol(bp);
2310         }
2311         pci_disable_device(pdev);
2312         return 0;
2313 }
2314
2315 static int b44_resume(struct pci_dev *pdev)
2316 {
2317         struct net_device *dev = pci_get_drvdata(pdev);
2318         struct b44 *bp = netdev_priv(dev);
2319         int rc = 0;
2320
2321         pci_restore_state(pdev);
2322         rc = pci_enable_device(pdev);
2323         if (rc) {
2324                 printk(KERN_ERR PFX "%s: pci_enable_device failed\n",
2325                         dev->name);
2326                 return rc;
2327         }
2328
2329         pci_set_master(pdev);
2330
2331         if (!netif_running(dev))
2332                 return 0;
2333
2334         rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2335         if (rc) {
2336                 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2337                 pci_disable_device(pdev);
2338                 return rc;
2339         }
2340
2341         spin_lock_irq(&bp->lock);
2342
2343         b44_init_rings(bp);
2344         b44_init_hw(bp, B44_FULL_RESET);
2345         netif_device_attach(bp->dev);
2346         spin_unlock_irq(&bp->lock);
2347
2348         b44_enable_ints(bp);
2349         netif_wake_queue(dev);
2350
2351         mod_timer(&bp->timer, jiffies + 1);
2352
2353         return 0;
2354 }
2355
2356 static struct pci_driver b44_driver = {
2357         .name           = DRV_MODULE_NAME,
2358         .id_table       = b44_pci_tbl,
2359         .probe          = b44_init_one,
2360         .remove         = __devexit_p(b44_remove_one),
2361         .suspend        = b44_suspend,
2362         .resume         = b44_resume,
2363 };
2364
2365 static int __init b44_init(void)
2366 {
2367         unsigned int dma_desc_align_size = dma_get_cache_alignment();
2368
2369         /* Setup paramaters for syncing RX/TX DMA descriptors */
2370         dma_desc_align_mask = ~(dma_desc_align_size - 1);
2371         dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2372
2373         return pci_register_driver(&b44_driver);
2374 }
2375
2376 static void __exit b44_cleanup(void)
2377 {
2378         pci_unregister_driver(&b44_driver);
2379 }
2380
2381 module_init(b44_init);
2382 module_exit(b44_cleanup);
2383