]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/b44.c
[PATCH] b44: add parameter
[linux-2.6-omap-h63xx.git] / drivers / net / b44.c
1 /* b44.c: Broadcom 4400 device driver.
2  *
3  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4  * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5  * Copyright (C) 2006 Broadcom Corporation.
6  *
7  * Distribute under GPL.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/dma-mapping.h>
23
24 #include <asm/uaccess.h>
25 #include <asm/io.h>
26 #include <asm/irq.h>
27
28 #include "b44.h"
29
30 #define DRV_MODULE_NAME         "b44"
31 #define PFX DRV_MODULE_NAME     ": "
32 #define DRV_MODULE_VERSION      "1.00"
33 #define DRV_MODULE_RELDATE      "Apr 7, 2006"
34
35 #define B44_DEF_MSG_ENABLE        \
36         (NETIF_MSG_DRV          | \
37          NETIF_MSG_PROBE        | \
38          NETIF_MSG_LINK         | \
39          NETIF_MSG_TIMER        | \
40          NETIF_MSG_IFDOWN       | \
41          NETIF_MSG_IFUP         | \
42          NETIF_MSG_RX_ERR       | \
43          NETIF_MSG_TX_ERR)
44
45 /* length of time before we decide the hardware is borked,
46  * and dev->tx_timeout() should be called to fix the problem
47  */
48 #define B44_TX_TIMEOUT                  (5 * HZ)
49
50 /* hardware minimum and maximum for a single frame's data payload */
51 #define B44_MIN_MTU                     60
52 #define B44_MAX_MTU                     1500
53
54 #define B44_RX_RING_SIZE                512
55 #define B44_DEF_RX_RING_PENDING         200
56 #define B44_RX_RING_BYTES       (sizeof(struct dma_desc) * \
57                                  B44_RX_RING_SIZE)
58 #define B44_TX_RING_SIZE                512
59 #define B44_DEF_TX_RING_PENDING         (B44_TX_RING_SIZE - 1)
60 #define B44_TX_RING_BYTES       (sizeof(struct dma_desc) * \
61                                  B44_TX_RING_SIZE)
62 #define B44_DMA_MASK 0x3fffffff
63
64 #define TX_RING_GAP(BP) \
65         (B44_TX_RING_SIZE - (BP)->tx_pending)
66 #define TX_BUFFS_AVAIL(BP)                                              \
67         (((BP)->tx_cons <= (BP)->tx_prod) ?                             \
68           (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :            \
69           (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70 #define NEXT_TX(N)              (((N) + 1) & (B44_TX_RING_SIZE - 1))
71
72 #define RX_PKT_BUF_SZ           (1536 + bp->rx_offset + 64)
73 #define TX_PKT_BUF_SZ           (B44_MAX_MTU + ETH_HLEN + 8)
74
75 /* minimum number of free TX descriptors required to wake up TX process */
76 #define B44_TX_WAKEUP_THRESH            (B44_TX_RING_SIZE / 4)
77
78 static char version[] __devinitdata =
79         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
80
81 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
82 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
83 MODULE_LICENSE("GPL");
84 MODULE_VERSION(DRV_MODULE_VERSION);
85
86 static int b44_debug = -1;      /* -1 == use B44_DEF_MSG_ENABLE as value */
87 module_param(b44_debug, int, 0);
88 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
89
90 static struct pci_device_id b44_pci_tbl[] = {
91         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
92           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
93         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
94           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
95         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
96           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
97         { }     /* terminate list with empty entry */
98 };
99
100 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
101
102 static void b44_halt(struct b44 *);
103 static void b44_init_rings(struct b44 *);
104 static void b44_init_hw(struct b44 *, int);
105
106 static int dma_desc_align_mask;
107 static int dma_desc_sync_size;
108
109 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
110 #define _B44(x...)      # x,
111 B44_STAT_REG_DECLARE
112 #undef _B44
113 };
114
115 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
116                                                 dma_addr_t dma_base,
117                                                 unsigned long offset,
118                                                 enum dma_data_direction dir)
119 {
120         dma_sync_single_range_for_device(&pdev->dev, dma_base,
121                                          offset & dma_desc_align_mask,
122                                          dma_desc_sync_size, dir);
123 }
124
125 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
126                                              dma_addr_t dma_base,
127                                              unsigned long offset,
128                                              enum dma_data_direction dir)
129 {
130         dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
131                                       offset & dma_desc_align_mask,
132                                       dma_desc_sync_size, dir);
133 }
134
135 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
136 {
137         return readl(bp->regs + reg);
138 }
139
140 static inline void bw32(const struct b44 *bp,
141                         unsigned long reg, unsigned long val)
142 {
143         writel(val, bp->regs + reg);
144 }
145
146 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
147                         u32 bit, unsigned long timeout, const int clear)
148 {
149         unsigned long i;
150
151         for (i = 0; i < timeout; i++) {
152                 u32 val = br32(bp, reg);
153
154                 if (clear && !(val & bit))
155                         break;
156                 if (!clear && (val & bit))
157                         break;
158                 udelay(10);
159         }
160         if (i == timeout) {
161                 printk(KERN_ERR PFX "%s: BUG!  Timeout waiting for bit %08x of register "
162                        "%lx to %s.\n",
163                        bp->dev->name,
164                        bit, reg,
165                        (clear ? "clear" : "set"));
166                 return -ENODEV;
167         }
168         return 0;
169 }
170
171 /* Sonics SiliconBackplane support routines.  ROFL, you should see all the
172  * buzz words used on this company's website :-)
173  *
174  * All of these routines must be invoked with bp->lock held and
175  * interrupts disabled.
176  */
177
178 #define SB_PCI_DMA             0x40000000      /* Client Mode PCI memory access space (1 GB) */
179 #define BCM4400_PCI_CORE_ADDR  0x18002000      /* Address of PCI core on BCM4400 cards */
180
181 static u32 ssb_get_core_rev(struct b44 *bp)
182 {
183         return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
184 }
185
186 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
187 {
188         u32 bar_orig, pci_rev, val;
189
190         pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
191         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
192         pci_rev = ssb_get_core_rev(bp);
193
194         val = br32(bp, B44_SBINTVEC);
195         val |= cores;
196         bw32(bp, B44_SBINTVEC, val);
197
198         val = br32(bp, SSB_PCI_TRANS_2);
199         val |= SSB_PCI_PREF | SSB_PCI_BURST;
200         bw32(bp, SSB_PCI_TRANS_2, val);
201
202         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
203
204         return pci_rev;
205 }
206
207 static void ssb_core_disable(struct b44 *bp)
208 {
209         if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
210                 return;
211
212         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
213         b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
214         b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
215         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
216                             SBTMSLOW_REJECT | SBTMSLOW_RESET));
217         br32(bp, B44_SBTMSLOW);
218         udelay(1);
219         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
220         br32(bp, B44_SBTMSLOW);
221         udelay(1);
222 }
223
224 static void ssb_core_reset(struct b44 *bp)
225 {
226         u32 val;
227
228         ssb_core_disable(bp);
229         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
230         br32(bp, B44_SBTMSLOW);
231         udelay(1);
232
233         /* Clear SERR if set, this is a hw bug workaround.  */
234         if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
235                 bw32(bp, B44_SBTMSHIGH, 0);
236
237         val = br32(bp, B44_SBIMSTATE);
238         if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
239                 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
240
241         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
242         br32(bp, B44_SBTMSLOW);
243         udelay(1);
244
245         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
246         br32(bp, B44_SBTMSLOW);
247         udelay(1);
248 }
249
250 static int ssb_core_unit(struct b44 *bp)
251 {
252 #if 0
253         u32 val = br32(bp, B44_SBADMATCH0);
254         u32 base;
255
256         type = val & SBADMATCH0_TYPE_MASK;
257         switch (type) {
258         case 0:
259                 base = val & SBADMATCH0_BS0_MASK;
260                 break;
261
262         case 1:
263                 base = val & SBADMATCH0_BS1_MASK;
264                 break;
265
266         case 2:
267         default:
268                 base = val & SBADMATCH0_BS2_MASK;
269                 break;
270         };
271 #endif
272         return 0;
273 }
274
275 static int ssb_is_core_up(struct b44 *bp)
276 {
277         return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
278                 == SBTMSLOW_CLOCK);
279 }
280
281 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
282 {
283         u32 val;
284
285         val  = ((u32) data[2]) << 24;
286         val |= ((u32) data[3]) << 16;
287         val |= ((u32) data[4]) <<  8;
288         val |= ((u32) data[5]) <<  0;
289         bw32(bp, B44_CAM_DATA_LO, val);
290         val = (CAM_DATA_HI_VALID |
291                (((u32) data[0]) << 8) |
292                (((u32) data[1]) << 0));
293         bw32(bp, B44_CAM_DATA_HI, val);
294         bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
295                             (index << CAM_CTRL_INDEX_SHIFT)));
296         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
297 }
298
299 static inline void __b44_disable_ints(struct b44 *bp)
300 {
301         bw32(bp, B44_IMASK, 0);
302 }
303
304 static void b44_disable_ints(struct b44 *bp)
305 {
306         __b44_disable_ints(bp);
307
308         /* Flush posted writes. */
309         br32(bp, B44_IMASK);
310 }
311
312 static void b44_enable_ints(struct b44 *bp)
313 {
314         bw32(bp, B44_IMASK, bp->imask);
315 }
316
317 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
318 {
319         int err;
320
321         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
322         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
323                              (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
324                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
325                              (reg << MDIO_DATA_RA_SHIFT) |
326                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
327         err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
328         *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
329
330         return err;
331 }
332
333 static int b44_writephy(struct b44 *bp, int reg, u32 val)
334 {
335         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
336         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
337                              (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
338                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
339                              (reg << MDIO_DATA_RA_SHIFT) |
340                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
341                              (val & MDIO_DATA_DATA)));
342         return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
343 }
344
345 /* miilib interface */
346 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
347  * due to code existing before miilib use was added to this driver.
348  * Someone should remove this artificial driver limitation in
349  * b44_{read,write}phy.  bp->phy_addr itself is fine (and needed).
350  */
351 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
352 {
353         u32 val;
354         struct b44 *bp = netdev_priv(dev);
355         int rc = b44_readphy(bp, location, &val);
356         if (rc)
357                 return 0xffffffff;
358         return val;
359 }
360
361 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
362                          int val)
363 {
364         struct b44 *bp = netdev_priv(dev);
365         b44_writephy(bp, location, val);
366 }
367
368 static int b44_phy_reset(struct b44 *bp)
369 {
370         u32 val;
371         int err;
372
373         err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
374         if (err)
375                 return err;
376         udelay(100);
377         err = b44_readphy(bp, MII_BMCR, &val);
378         if (!err) {
379                 if (val & BMCR_RESET) {
380                         printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
381                                bp->dev->name);
382                         err = -ENODEV;
383                 }
384         }
385
386         return 0;
387 }
388
389 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
390 {
391         u32 val;
392
393         bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
394         bp->flags |= pause_flags;
395
396         val = br32(bp, B44_RXCONFIG);
397         if (pause_flags & B44_FLAG_RX_PAUSE)
398                 val |= RXCONFIG_FLOW;
399         else
400                 val &= ~RXCONFIG_FLOW;
401         bw32(bp, B44_RXCONFIG, val);
402
403         val = br32(bp, B44_MAC_FLOW);
404         if (pause_flags & B44_FLAG_TX_PAUSE)
405                 val |= (MAC_FLOW_PAUSE_ENAB |
406                         (0xc0 & MAC_FLOW_RX_HI_WATER));
407         else
408                 val &= ~MAC_FLOW_PAUSE_ENAB;
409         bw32(bp, B44_MAC_FLOW, val);
410 }
411
412 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
413 {
414         u32 pause_enab = 0;
415
416         /* The driver supports only rx pause by default because
417            the b44 mac tx pause mechanism generates excessive
418            pause frames.
419            Use ethtool to turn on b44 tx pause if necessary.
420          */
421         if ((local & ADVERTISE_PAUSE_CAP) &&
422             (local & ADVERTISE_PAUSE_ASYM)){
423                 if ((remote & LPA_PAUSE_ASYM) &&
424                     !(remote & LPA_PAUSE_CAP))
425                         pause_enab |= B44_FLAG_RX_PAUSE;
426         }
427
428         __b44_set_flow_ctrl(bp, pause_enab);
429 }
430
431 static int b44_setup_phy(struct b44 *bp)
432 {
433         u32 val;
434         int err;
435
436         if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
437                 goto out;
438         if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
439                                 val & MII_ALEDCTRL_ALLMSK)) != 0)
440                 goto out;
441         if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
442                 goto out;
443         if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
444                                 val | MII_TLEDCTRL_ENABLE)) != 0)
445                 goto out;
446
447         if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
448                 u32 adv = ADVERTISE_CSMA;
449
450                 if (bp->flags & B44_FLAG_ADV_10HALF)
451                         adv |= ADVERTISE_10HALF;
452                 if (bp->flags & B44_FLAG_ADV_10FULL)
453                         adv |= ADVERTISE_10FULL;
454                 if (bp->flags & B44_FLAG_ADV_100HALF)
455                         adv |= ADVERTISE_100HALF;
456                 if (bp->flags & B44_FLAG_ADV_100FULL)
457                         adv |= ADVERTISE_100FULL;
458
459                 if (bp->flags & B44_FLAG_PAUSE_AUTO)
460                         adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
461
462                 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
463                         goto out;
464                 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
465                                                        BMCR_ANRESTART))) != 0)
466                         goto out;
467         } else {
468                 u32 bmcr;
469
470                 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
471                         goto out;
472                 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
473                 if (bp->flags & B44_FLAG_100_BASE_T)
474                         bmcr |= BMCR_SPEED100;
475                 if (bp->flags & B44_FLAG_FULL_DUPLEX)
476                         bmcr |= BMCR_FULLDPLX;
477                 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
478                         goto out;
479
480                 /* Since we will not be negotiating there is no safe way
481                  * to determine if the link partner supports flow control
482                  * or not.  So just disable it completely in this case.
483                  */
484                 b44_set_flow_ctrl(bp, 0, 0);
485         }
486
487 out:
488         return err;
489 }
490
491 static void b44_stats_update(struct b44 *bp)
492 {
493         unsigned long reg;
494         u32 *val;
495
496         val = &bp->hw_stats.tx_good_octets;
497         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
498                 *val++ += br32(bp, reg);
499         }
500
501         /* Pad */
502         reg += 8*4UL;
503
504         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
505                 *val++ += br32(bp, reg);
506         }
507 }
508
509 static void b44_link_report(struct b44 *bp)
510 {
511         if (!netif_carrier_ok(bp->dev)) {
512                 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
513         } else {
514                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
515                        bp->dev->name,
516                        (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
517                        (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
518
519                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
520                        "%s for RX.\n",
521                        bp->dev->name,
522                        (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
523                        (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
524         }
525 }
526
527 static void b44_check_phy(struct b44 *bp)
528 {
529         u32 bmsr, aux;
530
531         if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
532             !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
533             (bmsr != 0xffff)) {
534                 if (aux & MII_AUXCTRL_SPEED)
535                         bp->flags |= B44_FLAG_100_BASE_T;
536                 else
537                         bp->flags &= ~B44_FLAG_100_BASE_T;
538                 if (aux & MII_AUXCTRL_DUPLEX)
539                         bp->flags |= B44_FLAG_FULL_DUPLEX;
540                 else
541                         bp->flags &= ~B44_FLAG_FULL_DUPLEX;
542
543                 if (!netif_carrier_ok(bp->dev) &&
544                     (bmsr & BMSR_LSTATUS)) {
545                         u32 val = br32(bp, B44_TX_CTRL);
546                         u32 local_adv, remote_adv;
547
548                         if (bp->flags & B44_FLAG_FULL_DUPLEX)
549                                 val |= TX_CTRL_DUPLEX;
550                         else
551                                 val &= ~TX_CTRL_DUPLEX;
552                         bw32(bp, B44_TX_CTRL, val);
553
554                         if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
555                             !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
556                             !b44_readphy(bp, MII_LPA, &remote_adv))
557                                 b44_set_flow_ctrl(bp, local_adv, remote_adv);
558
559                         /* Link now up */
560                         netif_carrier_on(bp->dev);
561                         b44_link_report(bp);
562                 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
563                         /* Link now down */
564                         netif_carrier_off(bp->dev);
565                         b44_link_report(bp);
566                 }
567
568                 if (bmsr & BMSR_RFAULT)
569                         printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
570                                bp->dev->name);
571                 if (bmsr & BMSR_JCD)
572                         printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
573                                bp->dev->name);
574         }
575 }
576
577 static void b44_timer(unsigned long __opaque)
578 {
579         struct b44 *bp = (struct b44 *) __opaque;
580
581         spin_lock_irq(&bp->lock);
582
583         b44_check_phy(bp);
584
585         b44_stats_update(bp);
586
587         spin_unlock_irq(&bp->lock);
588
589         bp->timer.expires = jiffies + HZ;
590         add_timer(&bp->timer);
591 }
592
593 static void b44_tx(struct b44 *bp)
594 {
595         u32 cur, cons;
596
597         cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
598         cur /= sizeof(struct dma_desc);
599
600         /* XXX needs updating when NETIF_F_SG is supported */
601         for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
602                 struct ring_info *rp = &bp->tx_buffers[cons];
603                 struct sk_buff *skb = rp->skb;
604
605                 BUG_ON(skb == NULL);
606
607                 pci_unmap_single(bp->pdev,
608                                  pci_unmap_addr(rp, mapping),
609                                  skb->len,
610                                  PCI_DMA_TODEVICE);
611                 rp->skb = NULL;
612                 dev_kfree_skb_irq(skb);
613         }
614
615         bp->tx_cons = cons;
616         if (netif_queue_stopped(bp->dev) &&
617             TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
618                 netif_wake_queue(bp->dev);
619
620         bw32(bp, B44_GPTIMER, 0);
621 }
622
623 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
624  * before the DMA address you give it.  So we allocate 30 more bytes
625  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
626  * point the chip at 30 bytes past where the rx_header will go.
627  */
628 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
629 {
630         struct dma_desc *dp;
631         struct ring_info *src_map, *map;
632         struct rx_header *rh;
633         struct sk_buff *skb;
634         dma_addr_t mapping;
635         int dest_idx;
636         u32 ctrl;
637
638         src_map = NULL;
639         if (src_idx >= 0)
640                 src_map = &bp->rx_buffers[src_idx];
641         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
642         map = &bp->rx_buffers[dest_idx];
643         skb = dev_alloc_skb(RX_PKT_BUF_SZ);
644         if (skb == NULL)
645                 return -ENOMEM;
646
647         mapping = pci_map_single(bp->pdev, skb->data,
648                                  RX_PKT_BUF_SZ,
649                                  PCI_DMA_FROMDEVICE);
650
651         /* Hardware bug work-around, the chip is unable to do PCI DMA
652            to/from anything above 1GB :-( */
653         if (dma_mapping_error(mapping) ||
654                 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
655                 /* Sigh... */
656                 if (!dma_mapping_error(mapping))
657                         pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
658                 dev_kfree_skb_any(skb);
659                 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
660                 if (skb == NULL)
661                         return -ENOMEM;
662                 mapping = pci_map_single(bp->pdev, skb->data,
663                                          RX_PKT_BUF_SZ,
664                                          PCI_DMA_FROMDEVICE);
665                 if (dma_mapping_error(mapping) ||
666                         mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
667                         if (!dma_mapping_error(mapping))
668                                 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
669                         dev_kfree_skb_any(skb);
670                         return -ENOMEM;
671                 }
672         }
673
674         skb->dev = bp->dev;
675         skb_reserve(skb, bp->rx_offset);
676
677         rh = (struct rx_header *)
678                 (skb->data - bp->rx_offset);
679         rh->len = 0;
680         rh->flags = 0;
681
682         map->skb = skb;
683         pci_unmap_addr_set(map, mapping, mapping);
684
685         if (src_map != NULL)
686                 src_map->skb = NULL;
687
688         ctrl  = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
689         if (dest_idx == (B44_RX_RING_SIZE - 1))
690                 ctrl |= DESC_CTRL_EOT;
691
692         dp = &bp->rx_ring[dest_idx];
693         dp->ctrl = cpu_to_le32(ctrl);
694         dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
695
696         if (bp->flags & B44_FLAG_RX_RING_HACK)
697                 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
698                                              dest_idx * sizeof(dp),
699                                              DMA_BIDIRECTIONAL);
700
701         return RX_PKT_BUF_SZ;
702 }
703
704 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
705 {
706         struct dma_desc *src_desc, *dest_desc;
707         struct ring_info *src_map, *dest_map;
708         struct rx_header *rh;
709         int dest_idx;
710         u32 ctrl;
711
712         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
713         dest_desc = &bp->rx_ring[dest_idx];
714         dest_map = &bp->rx_buffers[dest_idx];
715         src_desc = &bp->rx_ring[src_idx];
716         src_map = &bp->rx_buffers[src_idx];
717
718         dest_map->skb = src_map->skb;
719         rh = (struct rx_header *) src_map->skb->data;
720         rh->len = 0;
721         rh->flags = 0;
722         pci_unmap_addr_set(dest_map, mapping,
723                            pci_unmap_addr(src_map, mapping));
724
725         if (bp->flags & B44_FLAG_RX_RING_HACK)
726                 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
727                                           src_idx * sizeof(src_desc),
728                                           DMA_BIDIRECTIONAL);
729
730         ctrl = src_desc->ctrl;
731         if (dest_idx == (B44_RX_RING_SIZE - 1))
732                 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
733         else
734                 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
735
736         dest_desc->ctrl = ctrl;
737         dest_desc->addr = src_desc->addr;
738
739         src_map->skb = NULL;
740
741         if (bp->flags & B44_FLAG_RX_RING_HACK)
742                 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
743                                              dest_idx * sizeof(dest_desc),
744                                              DMA_BIDIRECTIONAL);
745
746         pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
747                                        RX_PKT_BUF_SZ,
748                                        PCI_DMA_FROMDEVICE);
749 }
750
751 static int b44_rx(struct b44 *bp, int budget)
752 {
753         int received;
754         u32 cons, prod;
755
756         received = 0;
757         prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
758         prod /= sizeof(struct dma_desc);
759         cons = bp->rx_cons;
760
761         while (cons != prod && budget > 0) {
762                 struct ring_info *rp = &bp->rx_buffers[cons];
763                 struct sk_buff *skb = rp->skb;
764                 dma_addr_t map = pci_unmap_addr(rp, mapping);
765                 struct rx_header *rh;
766                 u16 len;
767
768                 pci_dma_sync_single_for_cpu(bp->pdev, map,
769                                             RX_PKT_BUF_SZ,
770                                             PCI_DMA_FROMDEVICE);
771                 rh = (struct rx_header *) skb->data;
772                 len = cpu_to_le16(rh->len);
773                 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
774                     (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
775                 drop_it:
776                         b44_recycle_rx(bp, cons, bp->rx_prod);
777                 drop_it_no_recycle:
778                         bp->stats.rx_dropped++;
779                         goto next_pkt;
780                 }
781
782                 if (len == 0) {
783                         int i = 0;
784
785                         do {
786                                 udelay(2);
787                                 barrier();
788                                 len = cpu_to_le16(rh->len);
789                         } while (len == 0 && i++ < 5);
790                         if (len == 0)
791                                 goto drop_it;
792                 }
793
794                 /* Omit CRC. */
795                 len -= 4;
796
797                 if (len > RX_COPY_THRESHOLD) {
798                         int skb_size;
799                         skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
800                         if (skb_size < 0)
801                                 goto drop_it;
802                         pci_unmap_single(bp->pdev, map,
803                                          skb_size, PCI_DMA_FROMDEVICE);
804                         /* Leave out rx_header */
805                         skb_put(skb, len+bp->rx_offset);
806                         skb_pull(skb,bp->rx_offset);
807                 } else {
808                         struct sk_buff *copy_skb;
809
810                         b44_recycle_rx(bp, cons, bp->rx_prod);
811                         copy_skb = dev_alloc_skb(len + 2);
812                         if (copy_skb == NULL)
813                                 goto drop_it_no_recycle;
814
815                         copy_skb->dev = bp->dev;
816                         skb_reserve(copy_skb, 2);
817                         skb_put(copy_skb, len);
818                         /* DMA sync done above, copy just the actual packet */
819                         memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
820
821                         skb = copy_skb;
822                 }
823                 skb->ip_summed = CHECKSUM_NONE;
824                 skb->protocol = eth_type_trans(skb, bp->dev);
825                 netif_receive_skb(skb);
826                 bp->dev->last_rx = jiffies;
827                 received++;
828                 budget--;
829         next_pkt:
830                 bp->rx_prod = (bp->rx_prod + 1) &
831                         (B44_RX_RING_SIZE - 1);
832                 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
833         }
834
835         bp->rx_cons = cons;
836         bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
837
838         return received;
839 }
840
841 static int b44_poll(struct net_device *netdev, int *budget)
842 {
843         struct b44 *bp = netdev_priv(netdev);
844         int done;
845
846         spin_lock_irq(&bp->lock);
847
848         if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
849                 /* spin_lock(&bp->tx_lock); */
850                 b44_tx(bp);
851                 /* spin_unlock(&bp->tx_lock); */
852         }
853         spin_unlock_irq(&bp->lock);
854
855         done = 1;
856         if (bp->istat & ISTAT_RX) {
857                 int orig_budget = *budget;
858                 int work_done;
859
860                 if (orig_budget > netdev->quota)
861                         orig_budget = netdev->quota;
862
863                 work_done = b44_rx(bp, orig_budget);
864
865                 *budget -= work_done;
866                 netdev->quota -= work_done;
867
868                 if (work_done >= orig_budget)
869                         done = 0;
870         }
871
872         if (bp->istat & ISTAT_ERRORS) {
873                 spin_lock_irq(&bp->lock);
874                 b44_halt(bp);
875                 b44_init_rings(bp);
876                 b44_init_hw(bp, 1);
877                 netif_wake_queue(bp->dev);
878                 spin_unlock_irq(&bp->lock);
879                 done = 1;
880         }
881
882         if (done) {
883                 netif_rx_complete(netdev);
884                 b44_enable_ints(bp);
885         }
886
887         return (done ? 0 : 1);
888 }
889
890 static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
891 {
892         struct net_device *dev = dev_id;
893         struct b44 *bp = netdev_priv(dev);
894         u32 istat, imask;
895         int handled = 0;
896
897         spin_lock(&bp->lock);
898
899         istat = br32(bp, B44_ISTAT);
900         imask = br32(bp, B44_IMASK);
901
902         /* ??? What the fuck is the purpose of the interrupt mask
903          * ??? register if we have to mask it out by hand anyways?
904          */
905         istat &= imask;
906         if (istat) {
907                 handled = 1;
908
909                 if (unlikely(!netif_running(dev))) {
910                         printk(KERN_INFO "%s: late interrupt.\n", dev->name);
911                         goto irq_ack;
912                 }
913
914                 if (netif_rx_schedule_prep(dev)) {
915                         /* NOTE: These writes are posted by the readback of
916                          *       the ISTAT register below.
917                          */
918                         bp->istat = istat;
919                         __b44_disable_ints(bp);
920                         __netif_rx_schedule(dev);
921                 } else {
922                         printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
923                                dev->name);
924                 }
925
926 irq_ack:
927                 bw32(bp, B44_ISTAT, istat);
928                 br32(bp, B44_ISTAT);
929         }
930         spin_unlock(&bp->lock);
931         return IRQ_RETVAL(handled);
932 }
933
934 static void b44_tx_timeout(struct net_device *dev)
935 {
936         struct b44 *bp = netdev_priv(dev);
937
938         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
939                dev->name);
940
941         spin_lock_irq(&bp->lock);
942
943         b44_halt(bp);
944         b44_init_rings(bp);
945         b44_init_hw(bp, 1);
946
947         spin_unlock_irq(&bp->lock);
948
949         b44_enable_ints(bp);
950
951         netif_wake_queue(dev);
952 }
953
954 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
955 {
956         struct b44 *bp = netdev_priv(dev);
957         struct sk_buff *bounce_skb;
958         int rc = NETDEV_TX_OK;
959         dma_addr_t mapping;
960         u32 len, entry, ctrl;
961
962         len = skb->len;
963         spin_lock_irq(&bp->lock);
964
965         /* This is a hard error, log it. */
966         if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
967                 netif_stop_queue(dev);
968                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
969                        dev->name);
970                 goto err_out;
971         }
972
973         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
974         if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
975                 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
976                 if (!dma_mapping_error(mapping))
977                         pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
978
979                 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
980                                              GFP_ATOMIC|GFP_DMA);
981                 if (!bounce_skb)
982                         goto err_out;
983
984                 mapping = pci_map_single(bp->pdev, bounce_skb->data,
985                                          len, PCI_DMA_TODEVICE);
986                 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
987                         if (!dma_mapping_error(mapping))
988                                 pci_unmap_single(bp->pdev, mapping,
989                                          len, PCI_DMA_TODEVICE);
990                         dev_kfree_skb_any(bounce_skb);
991                         goto err_out;
992                 }
993
994                 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
995                 dev_kfree_skb_any(skb);
996                 skb = bounce_skb;
997         }
998
999         entry = bp->tx_prod;
1000         bp->tx_buffers[entry].skb = skb;
1001         pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1002
1003         ctrl  = (len & DESC_CTRL_LEN);
1004         ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1005         if (entry == (B44_TX_RING_SIZE - 1))
1006                 ctrl |= DESC_CTRL_EOT;
1007
1008         bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1009         bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1010
1011         if (bp->flags & B44_FLAG_TX_RING_HACK)
1012                 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1013                                              entry * sizeof(bp->tx_ring[0]),
1014                                              DMA_TO_DEVICE);
1015
1016         entry = NEXT_TX(entry);
1017
1018         bp->tx_prod = entry;
1019
1020         wmb();
1021
1022         bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1023         if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1024                 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1025         if (bp->flags & B44_FLAG_REORDER_BUG)
1026                 br32(bp, B44_DMATX_PTR);
1027
1028         if (TX_BUFFS_AVAIL(bp) < 1)
1029                 netif_stop_queue(dev);
1030
1031         dev->trans_start = jiffies;
1032
1033 out_unlock:
1034         spin_unlock_irq(&bp->lock);
1035
1036         return rc;
1037
1038 err_out:
1039         rc = NETDEV_TX_BUSY;
1040         goto out_unlock;
1041 }
1042
1043 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1044 {
1045         struct b44 *bp = netdev_priv(dev);
1046
1047         if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1048                 return -EINVAL;
1049
1050         if (!netif_running(dev)) {
1051                 /* We'll just catch it later when the
1052                  * device is up'd.
1053                  */
1054                 dev->mtu = new_mtu;
1055                 return 0;
1056         }
1057
1058         spin_lock_irq(&bp->lock);
1059         b44_halt(bp);
1060         dev->mtu = new_mtu;
1061         b44_init_rings(bp);
1062         b44_init_hw(bp, 1);
1063         spin_unlock_irq(&bp->lock);
1064
1065         b44_enable_ints(bp);
1066
1067         return 0;
1068 }
1069
1070 /* Free up pending packets in all rx/tx rings.
1071  *
1072  * The chip has been shut down and the driver detached from
1073  * the networking, so no interrupts or new tx packets will
1074  * end up in the driver.  bp->lock is not held and we are not
1075  * in an interrupt context and thus may sleep.
1076  */
1077 static void b44_free_rings(struct b44 *bp)
1078 {
1079         struct ring_info *rp;
1080         int i;
1081
1082         for (i = 0; i < B44_RX_RING_SIZE; i++) {
1083                 rp = &bp->rx_buffers[i];
1084
1085                 if (rp->skb == NULL)
1086                         continue;
1087                 pci_unmap_single(bp->pdev,
1088                                  pci_unmap_addr(rp, mapping),
1089                                  RX_PKT_BUF_SZ,
1090                                  PCI_DMA_FROMDEVICE);
1091                 dev_kfree_skb_any(rp->skb);
1092                 rp->skb = NULL;
1093         }
1094
1095         /* XXX needs changes once NETIF_F_SG is set... */
1096         for (i = 0; i < B44_TX_RING_SIZE; i++) {
1097                 rp = &bp->tx_buffers[i];
1098
1099                 if (rp->skb == NULL)
1100                         continue;
1101                 pci_unmap_single(bp->pdev,
1102                                  pci_unmap_addr(rp, mapping),
1103                                  rp->skb->len,
1104                                  PCI_DMA_TODEVICE);
1105                 dev_kfree_skb_any(rp->skb);
1106                 rp->skb = NULL;
1107         }
1108 }
1109
1110 /* Initialize tx/rx rings for packet processing.
1111  *
1112  * The chip has been shut down and the driver detached from
1113  * the networking, so no interrupts or new tx packets will
1114  * end up in the driver.
1115  */
1116 static void b44_init_rings(struct b44 *bp)
1117 {
1118         int i;
1119
1120         b44_free_rings(bp);
1121
1122         memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1123         memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1124
1125         if (bp->flags & B44_FLAG_RX_RING_HACK)
1126                 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1127                                            DMA_TABLE_BYTES,
1128                                            PCI_DMA_BIDIRECTIONAL);
1129
1130         if (bp->flags & B44_FLAG_TX_RING_HACK)
1131                 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1132                                            DMA_TABLE_BYTES,
1133                                            PCI_DMA_TODEVICE);
1134
1135         for (i = 0; i < bp->rx_pending; i++) {
1136                 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1137                         break;
1138         }
1139 }
1140
1141 /*
1142  * Must not be invoked with interrupt sources disabled and
1143  * the hardware shutdown down.
1144  */
1145 static void b44_free_consistent(struct b44 *bp)
1146 {
1147         kfree(bp->rx_buffers);
1148         bp->rx_buffers = NULL;
1149         kfree(bp->tx_buffers);
1150         bp->tx_buffers = NULL;
1151         if (bp->rx_ring) {
1152                 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1153                         dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1154                                          DMA_TABLE_BYTES,
1155                                          DMA_BIDIRECTIONAL);
1156                         kfree(bp->rx_ring);
1157                 } else
1158                         pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1159                                             bp->rx_ring, bp->rx_ring_dma);
1160                 bp->rx_ring = NULL;
1161                 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1162         }
1163         if (bp->tx_ring) {
1164                 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1165                         dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1166                                          DMA_TABLE_BYTES,
1167                                          DMA_TO_DEVICE);
1168                         kfree(bp->tx_ring);
1169                 } else
1170                         pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1171                                             bp->tx_ring, bp->tx_ring_dma);
1172                 bp->tx_ring = NULL;
1173                 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1174         }
1175 }
1176
1177 /*
1178  * Must not be invoked with interrupt sources disabled and
1179  * the hardware shutdown down.  Can sleep.
1180  */
1181 static int b44_alloc_consistent(struct b44 *bp)
1182 {
1183         int size;
1184
1185         size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1186         bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1187         if (!bp->rx_buffers)
1188                 goto out_err;
1189
1190         size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1191         bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1192         if (!bp->tx_buffers)
1193                 goto out_err;
1194
1195         size = DMA_TABLE_BYTES;
1196         bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1197         if (!bp->rx_ring) {
1198                 /* Allocation may have failed due to pci_alloc_consistent
1199                    insisting on use of GFP_DMA, which is more restrictive
1200                    than necessary...  */
1201                 struct dma_desc *rx_ring;
1202                 dma_addr_t rx_ring_dma;
1203
1204                 rx_ring = kzalloc(size, GFP_KERNEL);
1205                 if (!rx_ring)
1206                         goto out_err;
1207
1208                 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1209                                              DMA_TABLE_BYTES,
1210                                              DMA_BIDIRECTIONAL);
1211
1212                 if (dma_mapping_error(rx_ring_dma) ||
1213                         rx_ring_dma + size > B44_DMA_MASK) {
1214                         kfree(rx_ring);
1215                         goto out_err;
1216                 }
1217
1218                 bp->rx_ring = rx_ring;
1219                 bp->rx_ring_dma = rx_ring_dma;
1220                 bp->flags |= B44_FLAG_RX_RING_HACK;
1221         }
1222
1223         bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1224         if (!bp->tx_ring) {
1225                 /* Allocation may have failed due to pci_alloc_consistent
1226                    insisting on use of GFP_DMA, which is more restrictive
1227                    than necessary...  */
1228                 struct dma_desc *tx_ring;
1229                 dma_addr_t tx_ring_dma;
1230
1231                 tx_ring = kzalloc(size, GFP_KERNEL);
1232                 if (!tx_ring)
1233                         goto out_err;
1234
1235                 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1236                                              DMA_TABLE_BYTES,
1237                                              DMA_TO_DEVICE);
1238
1239                 if (dma_mapping_error(tx_ring_dma) ||
1240                         tx_ring_dma + size > B44_DMA_MASK) {
1241                         kfree(tx_ring);
1242                         goto out_err;
1243                 }
1244
1245                 bp->tx_ring = tx_ring;
1246                 bp->tx_ring_dma = tx_ring_dma;
1247                 bp->flags |= B44_FLAG_TX_RING_HACK;
1248         }
1249
1250         return 0;
1251
1252 out_err:
1253         b44_free_consistent(bp);
1254         return -ENOMEM;
1255 }
1256
1257 /* bp->lock is held. */
1258 static void b44_clear_stats(struct b44 *bp)
1259 {
1260         unsigned long reg;
1261
1262         bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1263         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1264                 br32(bp, reg);
1265         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1266                 br32(bp, reg);
1267 }
1268
1269 /* bp->lock is held. */
1270 static void b44_chip_reset(struct b44 *bp)
1271 {
1272         if (ssb_is_core_up(bp)) {
1273                 bw32(bp, B44_RCV_LAZY, 0);
1274                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1275                 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1276                 bw32(bp, B44_DMATX_CTRL, 0);
1277                 bp->tx_prod = bp->tx_cons = 0;
1278                 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1279                         b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1280                                      100, 0);
1281                 }
1282                 bw32(bp, B44_DMARX_CTRL, 0);
1283                 bp->rx_prod = bp->rx_cons = 0;
1284         } else {
1285                 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1286                                    SBINTVEC_ENET0 :
1287                                    SBINTVEC_ENET1));
1288         }
1289
1290         ssb_core_reset(bp);
1291
1292         b44_clear_stats(bp);
1293
1294         /* Make PHY accessible. */
1295         bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1296                              (0x0d & MDIO_CTRL_MAXF_MASK)));
1297         br32(bp, B44_MDIO_CTRL);
1298
1299         if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1300                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1301                 br32(bp, B44_ENET_CTRL);
1302                 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1303         } else {
1304                 u32 val = br32(bp, B44_DEVCTRL);
1305
1306                 if (val & DEVCTRL_EPR) {
1307                         bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1308                         br32(bp, B44_DEVCTRL);
1309                         udelay(100);
1310                 }
1311                 bp->flags |= B44_FLAG_INTERNAL_PHY;
1312         }
1313 }
1314
1315 /* bp->lock is held. */
1316 static void b44_halt(struct b44 *bp)
1317 {
1318         b44_disable_ints(bp);
1319         b44_chip_reset(bp);
1320 }
1321
1322 /* bp->lock is held. */
1323 static void __b44_set_mac_addr(struct b44 *bp)
1324 {
1325         bw32(bp, B44_CAM_CTRL, 0);
1326         if (!(bp->dev->flags & IFF_PROMISC)) {
1327                 u32 val;
1328
1329                 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1330                 val = br32(bp, B44_CAM_CTRL);
1331                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1332         }
1333 }
1334
1335 static int b44_set_mac_addr(struct net_device *dev, void *p)
1336 {
1337         struct b44 *bp = netdev_priv(dev);
1338         struct sockaddr *addr = p;
1339
1340         if (netif_running(dev))
1341                 return -EBUSY;
1342
1343         if (!is_valid_ether_addr(addr->sa_data))
1344                 return -EINVAL;
1345
1346         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1347
1348         spin_lock_irq(&bp->lock);
1349         __b44_set_mac_addr(bp);
1350         spin_unlock_irq(&bp->lock);
1351
1352         return 0;
1353 }
1354
1355 /* Called at device open time to get the chip ready for
1356  * packet processing.  Invoked with bp->lock held.
1357  */
1358 static void __b44_set_rx_mode(struct net_device *);
1359 static void b44_init_hw(struct b44 *bp, int full_reset)
1360 {
1361         u32 val;
1362
1363         b44_chip_reset(bp);
1364         if (full_reset) {
1365                 b44_phy_reset(bp);
1366                 b44_setup_phy(bp);
1367         }
1368
1369         /* Enable CRC32, set proper LED modes and power on PHY */
1370         bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1371         bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1372
1373         /* This sets the MAC address too.  */
1374         __b44_set_rx_mode(bp->dev);
1375
1376         /* MTU + eth header + possible VLAN tag + struct rx_header */
1377         bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1378         bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1379
1380         bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1381         if (full_reset) {
1382                 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1383                 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1384                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1385                                       (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1386                 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1387
1388                 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1389                 bp->rx_prod = bp->rx_pending;
1390
1391                 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1392         } else {
1393                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1394                                       (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1395         }
1396
1397         val = br32(bp, B44_ENET_CTRL);
1398         bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1399 }
1400
1401 static int b44_open(struct net_device *dev)
1402 {
1403         struct b44 *bp = netdev_priv(dev);
1404         int err;
1405
1406         err = b44_alloc_consistent(bp);
1407         if (err)
1408                 goto out;
1409
1410         b44_init_rings(bp);
1411         b44_init_hw(bp, 1);
1412
1413         b44_check_phy(bp);
1414
1415         err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
1416         if (unlikely(err < 0)) {
1417                 b44_chip_reset(bp);
1418                 b44_free_rings(bp);
1419                 b44_free_consistent(bp);
1420                 goto out;
1421         }
1422
1423         init_timer(&bp->timer);
1424         bp->timer.expires = jiffies + HZ;
1425         bp->timer.data = (unsigned long) bp;
1426         bp->timer.function = b44_timer;
1427         add_timer(&bp->timer);
1428
1429         b44_enable_ints(bp);
1430         netif_start_queue(dev);
1431 out:
1432         return err;
1433 }
1434
1435 #if 0
1436 /*static*/ void b44_dump_state(struct b44 *bp)
1437 {
1438         u32 val32, val32_2, val32_3, val32_4, val32_5;
1439         u16 val16;
1440
1441         pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1442         printk("DEBUG: PCI status [%04x] \n", val16);
1443
1444 }
1445 #endif
1446
1447 #ifdef CONFIG_NET_POLL_CONTROLLER
1448 /*
1449  * Polling receive - used by netconsole and other diagnostic tools
1450  * to allow network i/o with interrupts disabled.
1451  */
1452 static void b44_poll_controller(struct net_device *dev)
1453 {
1454         disable_irq(dev->irq);
1455         b44_interrupt(dev->irq, dev, NULL);
1456         enable_irq(dev->irq);
1457 }
1458 #endif
1459
1460
1461 static void b44_setup_wol(struct b44 *bp)
1462 {
1463         u32 val;
1464         u16 pmval;
1465
1466         bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1467
1468         if (bp->flags & B44_FLAG_B0_ANDLATER) {
1469
1470                 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1471
1472                 val = bp->dev->dev_addr[2] << 24 |
1473                         bp->dev->dev_addr[3] << 16 |
1474                         bp->dev->dev_addr[4] << 8 |
1475                         bp->dev->dev_addr[5];
1476                 bw32(bp, B44_ADDR_LO, val);
1477
1478                 val = bp->dev->dev_addr[0] << 8 |
1479                         bp->dev->dev_addr[1];
1480                 bw32(bp, B44_ADDR_HI, val);
1481
1482                 val = br32(bp, B44_DEVCTRL);
1483                 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1484
1485         }
1486
1487         val = br32(bp, B44_SBTMSLOW);
1488         bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1489
1490         pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1491         pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1492
1493 }
1494
1495 static int b44_close(struct net_device *dev)
1496 {
1497         struct b44 *bp = netdev_priv(dev);
1498
1499         netif_stop_queue(dev);
1500
1501         netif_poll_disable(dev);
1502
1503         del_timer_sync(&bp->timer);
1504
1505         spin_lock_irq(&bp->lock);
1506
1507 #if 0
1508         b44_dump_state(bp);
1509 #endif
1510         b44_halt(bp);
1511         b44_free_rings(bp);
1512         netif_carrier_off(dev);
1513
1514         spin_unlock_irq(&bp->lock);
1515
1516         free_irq(dev->irq, dev);
1517
1518         netif_poll_enable(dev);
1519
1520         if (bp->flags & B44_FLAG_WOL_ENABLE) {
1521                 b44_init_hw(bp, 0);
1522                 b44_setup_wol(bp);
1523         }
1524
1525         b44_free_consistent(bp);
1526
1527         return 0;
1528 }
1529
1530 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1531 {
1532         struct b44 *bp = netdev_priv(dev);
1533         struct net_device_stats *nstat = &bp->stats;
1534         struct b44_hw_stats *hwstat = &bp->hw_stats;
1535
1536         /* Convert HW stats into netdevice stats. */
1537         nstat->rx_packets = hwstat->rx_pkts;
1538         nstat->tx_packets = hwstat->tx_pkts;
1539         nstat->rx_bytes   = hwstat->rx_octets;
1540         nstat->tx_bytes   = hwstat->tx_octets;
1541         nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1542                              hwstat->tx_oversize_pkts +
1543                              hwstat->tx_underruns +
1544                              hwstat->tx_excessive_cols +
1545                              hwstat->tx_late_cols);
1546         nstat->multicast  = hwstat->tx_multicast_pkts;
1547         nstat->collisions = hwstat->tx_total_cols;
1548
1549         nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1550                                    hwstat->rx_undersize);
1551         nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1552         nstat->rx_frame_errors  = hwstat->rx_align_errs;
1553         nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1554         nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1555                                    hwstat->rx_oversize_pkts +
1556                                    hwstat->rx_missed_pkts +
1557                                    hwstat->rx_crc_align_errs +
1558                                    hwstat->rx_undersize +
1559                                    hwstat->rx_crc_errs +
1560                                    hwstat->rx_align_errs +
1561                                    hwstat->rx_symbol_errs);
1562
1563         nstat->tx_aborted_errors = hwstat->tx_underruns;
1564 #if 0
1565         /* Carrier lost counter seems to be broken for some devices */
1566         nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1567 #endif
1568
1569         return nstat;
1570 }
1571
1572 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1573 {
1574         struct dev_mc_list *mclist;
1575         int i, num_ents;
1576
1577         num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1578         mclist = dev->mc_list;
1579         for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1580                 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1581         }
1582         return i+1;
1583 }
1584
1585 static void __b44_set_rx_mode(struct net_device *dev)
1586 {
1587         struct b44 *bp = netdev_priv(dev);
1588         u32 val;
1589
1590         val = br32(bp, B44_RXCONFIG);
1591         val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1592         if (dev->flags & IFF_PROMISC) {
1593                 val |= RXCONFIG_PROMISC;
1594                 bw32(bp, B44_RXCONFIG, val);
1595         } else {
1596                 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1597                 int i = 0;
1598
1599                 __b44_set_mac_addr(bp);
1600
1601                 if (dev->flags & IFF_ALLMULTI)
1602                         val |= RXCONFIG_ALLMULTI;
1603                 else
1604                         i = __b44_load_mcast(bp, dev);
1605
1606                 for (; i < 64; i++) {
1607                         __b44_cam_write(bp, zero, i);
1608                 }
1609                 bw32(bp, B44_RXCONFIG, val);
1610                 val = br32(bp, B44_CAM_CTRL);
1611                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1612         }
1613 }
1614
1615 static void b44_set_rx_mode(struct net_device *dev)
1616 {
1617         struct b44 *bp = netdev_priv(dev);
1618
1619         spin_lock_irq(&bp->lock);
1620         __b44_set_rx_mode(dev);
1621         spin_unlock_irq(&bp->lock);
1622 }
1623
1624 static u32 b44_get_msglevel(struct net_device *dev)
1625 {
1626         struct b44 *bp = netdev_priv(dev);
1627         return bp->msg_enable;
1628 }
1629
1630 static void b44_set_msglevel(struct net_device *dev, u32 value)
1631 {
1632         struct b44 *bp = netdev_priv(dev);
1633         bp->msg_enable = value;
1634 }
1635
1636 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1637 {
1638         struct b44 *bp = netdev_priv(dev);
1639         struct pci_dev *pci_dev = bp->pdev;
1640
1641         strcpy (info->driver, DRV_MODULE_NAME);
1642         strcpy (info->version, DRV_MODULE_VERSION);
1643         strcpy (info->bus_info, pci_name(pci_dev));
1644 }
1645
1646 static int b44_nway_reset(struct net_device *dev)
1647 {
1648         struct b44 *bp = netdev_priv(dev);
1649         u32 bmcr;
1650         int r;
1651
1652         spin_lock_irq(&bp->lock);
1653         b44_readphy(bp, MII_BMCR, &bmcr);
1654         b44_readphy(bp, MII_BMCR, &bmcr);
1655         r = -EINVAL;
1656         if (bmcr & BMCR_ANENABLE) {
1657                 b44_writephy(bp, MII_BMCR,
1658                              bmcr | BMCR_ANRESTART);
1659                 r = 0;
1660         }
1661         spin_unlock_irq(&bp->lock);
1662
1663         return r;
1664 }
1665
1666 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1667 {
1668         struct b44 *bp = netdev_priv(dev);
1669
1670         cmd->supported = (SUPPORTED_Autoneg);
1671         cmd->supported |= (SUPPORTED_100baseT_Half |
1672                           SUPPORTED_100baseT_Full |
1673                           SUPPORTED_10baseT_Half |
1674                           SUPPORTED_10baseT_Full |
1675                           SUPPORTED_MII);
1676
1677         cmd->advertising = 0;
1678         if (bp->flags & B44_FLAG_ADV_10HALF)
1679                 cmd->advertising |= ADVERTISED_10baseT_Half;
1680         if (bp->flags & B44_FLAG_ADV_10FULL)
1681                 cmd->advertising |= ADVERTISED_10baseT_Full;
1682         if (bp->flags & B44_FLAG_ADV_100HALF)
1683                 cmd->advertising |= ADVERTISED_100baseT_Half;
1684         if (bp->flags & B44_FLAG_ADV_100FULL)
1685                 cmd->advertising |= ADVERTISED_100baseT_Full;
1686         cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1687         cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1688                 SPEED_100 : SPEED_10;
1689         cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1690                 DUPLEX_FULL : DUPLEX_HALF;
1691         cmd->port = 0;
1692         cmd->phy_address = bp->phy_addr;
1693         cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1694                 XCVR_INTERNAL : XCVR_EXTERNAL;
1695         cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1696                 AUTONEG_DISABLE : AUTONEG_ENABLE;
1697         if (cmd->autoneg == AUTONEG_ENABLE)
1698                 cmd->advertising |= ADVERTISED_Autoneg;
1699         if (!netif_running(dev)){
1700                 cmd->speed = 0;
1701                 cmd->duplex = 0xff;
1702         }
1703         cmd->maxtxpkt = 0;
1704         cmd->maxrxpkt = 0;
1705         return 0;
1706 }
1707
1708 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1709 {
1710         struct b44 *bp = netdev_priv(dev);
1711
1712         /* We do not support gigabit. */
1713         if (cmd->autoneg == AUTONEG_ENABLE) {
1714                 if (cmd->advertising &
1715                     (ADVERTISED_1000baseT_Half |
1716                      ADVERTISED_1000baseT_Full))
1717                         return -EINVAL;
1718         } else if ((cmd->speed != SPEED_100 &&
1719                     cmd->speed != SPEED_10) ||
1720                    (cmd->duplex != DUPLEX_HALF &&
1721                     cmd->duplex != DUPLEX_FULL)) {
1722                         return -EINVAL;
1723         }
1724
1725         spin_lock_irq(&bp->lock);
1726
1727         if (cmd->autoneg == AUTONEG_ENABLE) {
1728                 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1729                                B44_FLAG_100_BASE_T |
1730                                B44_FLAG_FULL_DUPLEX |
1731                                B44_FLAG_ADV_10HALF |
1732                                B44_FLAG_ADV_10FULL |
1733                                B44_FLAG_ADV_100HALF |
1734                                B44_FLAG_ADV_100FULL);
1735                 if (cmd->advertising == 0) {
1736                         bp->flags |= (B44_FLAG_ADV_10HALF |
1737                                       B44_FLAG_ADV_10FULL |
1738                                       B44_FLAG_ADV_100HALF |
1739                                       B44_FLAG_ADV_100FULL);
1740                 } else {
1741                         if (cmd->advertising & ADVERTISED_10baseT_Half)
1742                                 bp->flags |= B44_FLAG_ADV_10HALF;
1743                         if (cmd->advertising & ADVERTISED_10baseT_Full)
1744                                 bp->flags |= B44_FLAG_ADV_10FULL;
1745                         if (cmd->advertising & ADVERTISED_100baseT_Half)
1746                                 bp->flags |= B44_FLAG_ADV_100HALF;
1747                         if (cmd->advertising & ADVERTISED_100baseT_Full)
1748                                 bp->flags |= B44_FLAG_ADV_100FULL;
1749                 }
1750         } else {
1751                 bp->flags |= B44_FLAG_FORCE_LINK;
1752                 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1753                 if (cmd->speed == SPEED_100)
1754                         bp->flags |= B44_FLAG_100_BASE_T;
1755                 if (cmd->duplex == DUPLEX_FULL)
1756                         bp->flags |= B44_FLAG_FULL_DUPLEX;
1757         }
1758
1759         if (netif_running(dev))
1760                 b44_setup_phy(bp);
1761
1762         spin_unlock_irq(&bp->lock);
1763
1764         return 0;
1765 }
1766
1767 static void b44_get_ringparam(struct net_device *dev,
1768                               struct ethtool_ringparam *ering)
1769 {
1770         struct b44 *bp = netdev_priv(dev);
1771
1772         ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1773         ering->rx_pending = bp->rx_pending;
1774
1775         /* XXX ethtool lacks a tx_max_pending, oops... */
1776 }
1777
1778 static int b44_set_ringparam(struct net_device *dev,
1779                              struct ethtool_ringparam *ering)
1780 {
1781         struct b44 *bp = netdev_priv(dev);
1782
1783         if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1784             (ering->rx_mini_pending != 0) ||
1785             (ering->rx_jumbo_pending != 0) ||
1786             (ering->tx_pending > B44_TX_RING_SIZE - 1))
1787                 return -EINVAL;
1788
1789         spin_lock_irq(&bp->lock);
1790
1791         bp->rx_pending = ering->rx_pending;
1792         bp->tx_pending = ering->tx_pending;
1793
1794         b44_halt(bp);
1795         b44_init_rings(bp);
1796         b44_init_hw(bp, 1);
1797         netif_wake_queue(bp->dev);
1798         spin_unlock_irq(&bp->lock);
1799
1800         b44_enable_ints(bp);
1801
1802         return 0;
1803 }
1804
1805 static void b44_get_pauseparam(struct net_device *dev,
1806                                 struct ethtool_pauseparam *epause)
1807 {
1808         struct b44 *bp = netdev_priv(dev);
1809
1810         epause->autoneg =
1811                 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1812         epause->rx_pause =
1813                 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1814         epause->tx_pause =
1815                 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1816 }
1817
1818 static int b44_set_pauseparam(struct net_device *dev,
1819                                 struct ethtool_pauseparam *epause)
1820 {
1821         struct b44 *bp = netdev_priv(dev);
1822
1823         spin_lock_irq(&bp->lock);
1824         if (epause->autoneg)
1825                 bp->flags |= B44_FLAG_PAUSE_AUTO;
1826         else
1827                 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1828         if (epause->rx_pause)
1829                 bp->flags |= B44_FLAG_RX_PAUSE;
1830         else
1831                 bp->flags &= ~B44_FLAG_RX_PAUSE;
1832         if (epause->tx_pause)
1833                 bp->flags |= B44_FLAG_TX_PAUSE;
1834         else
1835                 bp->flags &= ~B44_FLAG_TX_PAUSE;
1836         if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1837                 b44_halt(bp);
1838                 b44_init_rings(bp);
1839                 b44_init_hw(bp, 1);
1840         } else {
1841                 __b44_set_flow_ctrl(bp, bp->flags);
1842         }
1843         spin_unlock_irq(&bp->lock);
1844
1845         b44_enable_ints(bp);
1846
1847         return 0;
1848 }
1849
1850 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1851 {
1852         switch(stringset) {
1853         case ETH_SS_STATS:
1854                 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1855                 break;
1856         }
1857 }
1858
1859 static int b44_get_stats_count(struct net_device *dev)
1860 {
1861         return ARRAY_SIZE(b44_gstrings);
1862 }
1863
1864 static void b44_get_ethtool_stats(struct net_device *dev,
1865                                   struct ethtool_stats *stats, u64 *data)
1866 {
1867         struct b44 *bp = netdev_priv(dev);
1868         u32 *val = &bp->hw_stats.tx_good_octets;
1869         u32 i;
1870
1871         spin_lock_irq(&bp->lock);
1872
1873         b44_stats_update(bp);
1874
1875         for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1876                 *data++ = *val++;
1877
1878         spin_unlock_irq(&bp->lock);
1879 }
1880
1881 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1882 {
1883         struct b44 *bp = netdev_priv(dev);
1884
1885         wol->supported = WAKE_MAGIC;
1886         if (bp->flags & B44_FLAG_WOL_ENABLE)
1887                 wol->wolopts = WAKE_MAGIC;
1888         else
1889                 wol->wolopts = 0;
1890         memset(&wol->sopass, 0, sizeof(wol->sopass));
1891 }
1892
1893 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1894 {
1895         struct b44 *bp = netdev_priv(dev);
1896
1897         spin_lock_irq(&bp->lock);
1898         if (wol->wolopts & WAKE_MAGIC)
1899                 bp->flags |= B44_FLAG_WOL_ENABLE;
1900         else
1901                 bp->flags &= ~B44_FLAG_WOL_ENABLE;
1902         spin_unlock_irq(&bp->lock);
1903
1904         return 0;
1905 }
1906
1907 static struct ethtool_ops b44_ethtool_ops = {
1908         .get_drvinfo            = b44_get_drvinfo,
1909         .get_settings           = b44_get_settings,
1910         .set_settings           = b44_set_settings,
1911         .nway_reset             = b44_nway_reset,
1912         .get_link               = ethtool_op_get_link,
1913         .get_wol                = b44_get_wol,
1914         .set_wol                = b44_set_wol,
1915         .get_ringparam          = b44_get_ringparam,
1916         .set_ringparam          = b44_set_ringparam,
1917         .get_pauseparam         = b44_get_pauseparam,
1918         .set_pauseparam         = b44_set_pauseparam,
1919         .get_msglevel           = b44_get_msglevel,
1920         .set_msglevel           = b44_set_msglevel,
1921         .get_strings            = b44_get_strings,
1922         .get_stats_count        = b44_get_stats_count,
1923         .get_ethtool_stats      = b44_get_ethtool_stats,
1924         .get_perm_addr          = ethtool_op_get_perm_addr,
1925 };
1926
1927 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1928 {
1929         struct mii_ioctl_data *data = if_mii(ifr);
1930         struct b44 *bp = netdev_priv(dev);
1931         int err = -EINVAL;
1932
1933         if (!netif_running(dev))
1934                 goto out;
1935
1936         spin_lock_irq(&bp->lock);
1937         err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
1938         spin_unlock_irq(&bp->lock);
1939 out:
1940         return err;
1941 }
1942
1943 /* Read 128-bytes of EEPROM. */
1944 static int b44_read_eeprom(struct b44 *bp, u8 *data)
1945 {
1946         long i;
1947         u16 *ptr = (u16 *) data;
1948
1949         for (i = 0; i < 128; i += 2)
1950                 ptr[i / 2] = readw(bp->regs + 4096 + i);
1951
1952         return 0;
1953 }
1954
1955 static int __devinit b44_get_invariants(struct b44 *bp)
1956 {
1957         u8 eeprom[128];
1958         int err;
1959
1960         err = b44_read_eeprom(bp, &eeprom[0]);
1961         if (err)
1962                 goto out;
1963
1964         bp->dev->dev_addr[0] = eeprom[79];
1965         bp->dev->dev_addr[1] = eeprom[78];
1966         bp->dev->dev_addr[2] = eeprom[81];
1967         bp->dev->dev_addr[3] = eeprom[80];
1968         bp->dev->dev_addr[4] = eeprom[83];
1969         bp->dev->dev_addr[5] = eeprom[82];
1970
1971         if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
1972                 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
1973                 return -EINVAL;
1974         }
1975
1976         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
1977
1978         bp->phy_addr = eeprom[90] & 0x1f;
1979
1980         /* With this, plus the rx_header prepended to the data by the
1981          * hardware, we'll land the ethernet header on a 2-byte boundary.
1982          */
1983         bp->rx_offset = 30;
1984
1985         bp->imask = IMASK_DEF;
1986
1987         bp->core_unit = ssb_core_unit(bp);
1988         bp->dma_offset = SB_PCI_DMA;
1989
1990         /* XXX - really required?
1991            bp->flags |= B44_FLAG_BUGGY_TXPTR;
1992          */
1993
1994         if (ssb_get_core_rev(bp) >= 7)
1995                 bp->flags |= B44_FLAG_B0_ANDLATER;
1996
1997 out:
1998         return err;
1999 }
2000
2001 static int __devinit b44_init_one(struct pci_dev *pdev,
2002                                   const struct pci_device_id *ent)
2003 {
2004         static int b44_version_printed = 0;
2005         unsigned long b44reg_base, b44reg_len;
2006         struct net_device *dev;
2007         struct b44 *bp;
2008         int err, i;
2009
2010         if (b44_version_printed++ == 0)
2011                 printk(KERN_INFO "%s", version);
2012
2013         err = pci_enable_device(pdev);
2014         if (err) {
2015                 printk(KERN_ERR PFX "Cannot enable PCI device, "
2016                        "aborting.\n");
2017                 return err;
2018         }
2019
2020         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2021                 printk(KERN_ERR PFX "Cannot find proper PCI device "
2022                        "base address, aborting.\n");
2023                 err = -ENODEV;
2024                 goto err_out_disable_pdev;
2025         }
2026
2027         err = pci_request_regions(pdev, DRV_MODULE_NAME);
2028         if (err) {
2029                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
2030                        "aborting.\n");
2031                 goto err_out_disable_pdev;
2032         }
2033
2034         pci_set_master(pdev);
2035
2036         err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
2037         if (err) {
2038                 printk(KERN_ERR PFX "No usable DMA configuration, "
2039                        "aborting.\n");
2040                 goto err_out_free_res;
2041         }
2042
2043         err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
2044         if (err) {
2045                 printk(KERN_ERR PFX "No usable DMA configuration, "
2046                        "aborting.\n");
2047                 goto err_out_free_res;
2048         }
2049
2050         b44reg_base = pci_resource_start(pdev, 0);
2051         b44reg_len = pci_resource_len(pdev, 0);
2052
2053         dev = alloc_etherdev(sizeof(*bp));
2054         if (!dev) {
2055                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
2056                 err = -ENOMEM;
2057                 goto err_out_free_res;
2058         }
2059
2060         SET_MODULE_OWNER(dev);
2061         SET_NETDEV_DEV(dev,&pdev->dev);
2062
2063         /* No interesting netdevice features in this card... */
2064         dev->features |= 0;
2065
2066         bp = netdev_priv(dev);
2067         bp->pdev = pdev;
2068         bp->dev = dev;
2069
2070         bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2071
2072         spin_lock_init(&bp->lock);
2073
2074         bp->regs = ioremap(b44reg_base, b44reg_len);
2075         if (bp->regs == 0UL) {
2076                 printk(KERN_ERR PFX "Cannot map device registers, "
2077                        "aborting.\n");
2078                 err = -ENOMEM;
2079                 goto err_out_free_dev;
2080         }
2081
2082         bp->rx_pending = B44_DEF_RX_RING_PENDING;
2083         bp->tx_pending = B44_DEF_TX_RING_PENDING;
2084
2085         dev->open = b44_open;
2086         dev->stop = b44_close;
2087         dev->hard_start_xmit = b44_start_xmit;
2088         dev->get_stats = b44_get_stats;
2089         dev->set_multicast_list = b44_set_rx_mode;
2090         dev->set_mac_address = b44_set_mac_addr;
2091         dev->do_ioctl = b44_ioctl;
2092         dev->tx_timeout = b44_tx_timeout;
2093         dev->poll = b44_poll;
2094         dev->weight = 64;
2095         dev->watchdog_timeo = B44_TX_TIMEOUT;
2096 #ifdef CONFIG_NET_POLL_CONTROLLER
2097         dev->poll_controller = b44_poll_controller;
2098 #endif
2099         dev->change_mtu = b44_change_mtu;
2100         dev->irq = pdev->irq;
2101         SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2102
2103         netif_carrier_off(dev);
2104
2105         err = b44_get_invariants(bp);
2106         if (err) {
2107                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
2108                        "aborting.\n");
2109                 goto err_out_iounmap;
2110         }
2111
2112         bp->mii_if.dev = dev;
2113         bp->mii_if.mdio_read = b44_mii_read;
2114         bp->mii_if.mdio_write = b44_mii_write;
2115         bp->mii_if.phy_id = bp->phy_addr;
2116         bp->mii_if.phy_id_mask = 0x1f;
2117         bp->mii_if.reg_num_mask = 0x1f;
2118
2119         /* By default, advertise all speed/duplex settings. */
2120         bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2121                       B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2122
2123         /* By default, auto-negotiate PAUSE. */
2124         bp->flags |= B44_FLAG_PAUSE_AUTO;
2125
2126         err = register_netdev(dev);
2127         if (err) {
2128                 printk(KERN_ERR PFX "Cannot register net device, "
2129                        "aborting.\n");
2130                 goto err_out_iounmap;
2131         }
2132
2133         pci_set_drvdata(pdev, dev);
2134
2135         pci_save_state(bp->pdev);
2136
2137         /* Chip reset provides power to the b44 MAC & PCI cores, which
2138          * is necessary for MAC register access.
2139          */
2140         b44_chip_reset(bp);
2141
2142         printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2143         for (i = 0; i < 6; i++)
2144                 printk("%2.2x%c", dev->dev_addr[i],
2145                        i == 5 ? '\n' : ':');
2146
2147         return 0;
2148
2149 err_out_iounmap:
2150         iounmap(bp->regs);
2151
2152 err_out_free_dev:
2153         free_netdev(dev);
2154
2155 err_out_free_res:
2156         pci_release_regions(pdev);
2157
2158 err_out_disable_pdev:
2159         pci_disable_device(pdev);
2160         pci_set_drvdata(pdev, NULL);
2161         return err;
2162 }
2163
2164 static void __devexit b44_remove_one(struct pci_dev *pdev)
2165 {
2166         struct net_device *dev = pci_get_drvdata(pdev);
2167         struct b44 *bp = netdev_priv(dev);
2168
2169         unregister_netdev(dev);
2170         iounmap(bp->regs);
2171         free_netdev(dev);
2172         pci_release_regions(pdev);
2173         pci_disable_device(pdev);
2174         pci_set_drvdata(pdev, NULL);
2175 }
2176
2177 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2178 {
2179         struct net_device *dev = pci_get_drvdata(pdev);
2180         struct b44 *bp = netdev_priv(dev);
2181
2182         if (!netif_running(dev))
2183                  return 0;
2184
2185         del_timer_sync(&bp->timer);
2186
2187         spin_lock_irq(&bp->lock);
2188
2189         b44_halt(bp);
2190         netif_carrier_off(bp->dev);
2191         netif_device_detach(bp->dev);
2192         b44_free_rings(bp);
2193
2194         spin_unlock_irq(&bp->lock);
2195
2196         free_irq(dev->irq, dev);
2197         if (bp->flags & B44_FLAG_WOL_ENABLE) {
2198                 b44_init_hw(bp, 0);
2199                 b44_setup_wol(bp);
2200         }
2201         pci_disable_device(pdev);
2202         return 0;
2203 }
2204
2205 static int b44_resume(struct pci_dev *pdev)
2206 {
2207         struct net_device *dev = pci_get_drvdata(pdev);
2208         struct b44 *bp = netdev_priv(dev);
2209
2210         pci_restore_state(pdev);
2211         pci_enable_device(pdev);
2212         pci_set_master(pdev);
2213
2214         if (!netif_running(dev))
2215                 return 0;
2216
2217         if (request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev))
2218                 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2219
2220         spin_lock_irq(&bp->lock);
2221
2222         b44_init_rings(bp);
2223         b44_init_hw(bp, 1);
2224         netif_device_attach(bp->dev);
2225         spin_unlock_irq(&bp->lock);
2226
2227         bp->timer.expires = jiffies + HZ;
2228         add_timer(&bp->timer);
2229
2230         b44_enable_ints(bp);
2231         netif_wake_queue(dev);
2232         return 0;
2233 }
2234
2235 static struct pci_driver b44_driver = {
2236         .name           = DRV_MODULE_NAME,
2237         .id_table       = b44_pci_tbl,
2238         .probe          = b44_init_one,
2239         .remove         = __devexit_p(b44_remove_one),
2240         .suspend        = b44_suspend,
2241         .resume         = b44_resume,
2242 };
2243
2244 static int __init b44_init(void)
2245 {
2246         unsigned int dma_desc_align_size = dma_get_cache_alignment();
2247
2248         /* Setup paramaters for syncing RX/TX DMA descriptors */
2249         dma_desc_align_mask = ~(dma_desc_align_size - 1);
2250         dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2251
2252         return pci_module_init(&b44_driver);
2253 }
2254
2255 static void __exit b44_cleanup(void)
2256 {
2257         pci_unregister_driver(&b44_driver);
2258 }
2259
2260 module_init(b44_init);
2261 module_exit(b44_cleanup);
2262