X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fnet%2Fppp_generic.c;h=a32668e88e09fc2a157e3ac5500c94ae158c9529;hb=cb220c1af49644786944c549518b491d4c654030;hp=3b377f6cd4a0bff915d75d46cef3d66eb16cbf20;hpb=325a479c4c110db278ef3361460a48c4093252cc;p=linux-2.6-omap-h63xx.git diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 3b377f6cd4a..a32668e88e0 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -273,7 +273,7 @@ static int ppp_connect_channel(struct channel *pch, int unit); static int ppp_disconnect_channel(struct channel *pch); static void ppp_destroy_channel(struct channel *pch); -static struct class_simple *ppp_class; +static struct class *ppp_class; /* Translates a PPP protocol number to a NP index (NP == network protocol) */ static inline int proto_to_npindex(int proto) @@ -858,12 +858,12 @@ static int __init ppp_init(void) printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n"); err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); if (!err) { - ppp_class = class_simple_create(THIS_MODULE, "ppp"); + ppp_class = class_create(THIS_MODULE, "ppp"); if (IS_ERR(ppp_class)) { err = PTR_ERR(ppp_class); goto out_chrdev; } - class_simple_device_add(ppp_class, MKDEV(PPP_MAJOR, 0), NULL, "ppp"); + class_device_create(ppp_class, MKDEV(PPP_MAJOR, 0), NULL, "ppp"); err = devfs_mk_cdev(MKDEV(PPP_MAJOR, 0), S_IFCHR|S_IRUSR|S_IWUSR, "ppp"); if (err) @@ -876,8 +876,8 @@ out: return err; out_class: - class_simple_device_remove(MKDEV(PPP_MAJOR,0)); - class_simple_destroy(ppp_class); + class_device_destroy(ppp_class, MKDEV(PPP_MAJOR,0)); + class_destroy(ppp_class); out_chrdev: unregister_chrdev(PPP_MAJOR, "ppp"); goto out; @@ -1217,36 +1217,43 @@ ppp_push(struct ppp *ppp) */ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) { - int nch, len, fragsize; + int len, fragsize; int i, bits, hdrlen, mtu; - int flen, fnb; + int flen; + int navail, nfree; + int nbigger; unsigned char *p, *q; struct list_head *list; struct channel *pch; struct sk_buff *frag; struct ppp_channel *chan; - nch = 0; + nfree = 0; /* # channels which have no packet already queued */ + navail = 0; /* total # of usable channels (not deregistered) */ hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; + i = 0; list = &ppp->channels; while ((list = list->next) != &ppp->channels) { pch = list_entry(list, struct channel, clist); - nch += pch->avail = (skb_queue_len(&pch->file.xq) == 0); - /* - * If a channel hasn't had a fragment yet, it has to get - * one before we send any fragments on later channels. - * If it can't take a fragment now, don't give any - * to subsequent channels. - */ - if (!pch->had_frag && !pch->avail) { - while ((list = list->next) != &ppp->channels) { - pch = list_entry(list, struct channel, clist); - pch->avail = 0; + navail += pch->avail = (pch->chan != NULL); + if (pch->avail) { + if (skb_queue_empty(&pch->file.xq) || + !pch->had_frag) { + pch->avail = 2; + ++nfree; } - break; + if (!pch->had_frag && i < ppp->nxchan) + ppp->nxchan = i; } + ++i; } - if (nch == 0) + + /* + * Don't start sending this packet unless at least half of + * the channels are free. This gives much better TCP + * performance if we have a lot of channels. + */ + if (nfree == 0 || nfree < navail / 2) return 0; /* can't take now, leave it in xmit_pending */ /* Do protocol field compression (XXX this should be optional) */ @@ -1257,14 +1264,19 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) --len; } - /* decide on fragment size */ + /* + * Decide on fragment size. + * We create a fragment for each free channel regardless of + * how small they are (i.e. even 0 length) in order to minimize + * the time that it will take to detect when a channel drops + * a fragment. + */ fragsize = len; - if (nch > 1) { - int maxch = ROUNDUP(len, MIN_FRAG_SIZE); - if (nch > maxch) - nch = maxch; - fragsize = ROUNDUP(fragsize, nch); - } + if (nfree > 1) + fragsize = ROUNDUP(fragsize, nfree); + /* nbigger channels get fragsize bytes, the rest get fragsize-1, + except if nbigger==0, then they all get fragsize. */ + nbigger = len % nfree; /* skip to the channel after the one we last used and start at that one */ @@ -1278,7 +1290,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) /* create a fragment for each channel */ bits = B; - do { + while (nfree > 0 || len > 0) { list = list->next; if (list == &ppp->channels) { i = 0; @@ -1289,61 +1301,92 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) if (!pch->avail) continue; + /* + * Skip this channel if it has a fragment pending already and + * we haven't given a fragment to all of the free channels. + */ + if (pch->avail == 1) { + if (nfree > 0) + continue; + } else { + --nfree; + pch->avail = 1; + } + /* check the channel's mtu and whether it is still attached. */ spin_lock_bh(&pch->downl); - if (pch->chan == 0 || (mtu = pch->chan->mtu) < hdrlen) { - /* can't use this channel */ + if (pch->chan == NULL) { + /* can't use this channel, it's being deregistered */ spin_unlock_bh(&pch->downl); pch->avail = 0; - if (--nch == 0) + if (--navail == 0) break; continue; } /* - * We have to create multiple fragments for this channel - * if fragsize is greater than the channel's mtu. + * Create a fragment for this channel of + * min(max(mtu+2-hdrlen, 4), fragsize, len) bytes. + * If mtu+2-hdrlen < 4, that is a ridiculously small + * MTU, so we use mtu = 2 + hdrlen. */ if (fragsize > len) fragsize = len; - for (flen = fragsize; flen > 0; flen -= fnb) { - fnb = flen; - if (fnb > mtu + 2 - hdrlen) - fnb = mtu + 2 - hdrlen; - if (fnb >= len) - bits |= E; - frag = alloc_skb(fnb + hdrlen, GFP_ATOMIC); - if (frag == 0) - goto noskb; - q = skb_put(frag, fnb + hdrlen); - /* make the MP header */ - q[0] = PPP_MP >> 8; - q[1] = PPP_MP; - if (ppp->flags & SC_MP_XSHORTSEQ) { - q[2] = bits + ((ppp->nxseq >> 8) & 0xf); - q[3] = ppp->nxseq; - } else { - q[2] = bits; - q[3] = ppp->nxseq >> 16; - q[4] = ppp->nxseq >> 8; - q[5] = ppp->nxseq; - } - - /* copy the data in */ - memcpy(q + hdrlen, p, fnb); - - /* try to send it down the channel */ - chan = pch->chan; - if (!chan->ops->start_xmit(chan, frag)) - skb_queue_tail(&pch->file.xq, frag); - pch->had_frag = 1; - p += fnb; - len -= fnb; - ++ppp->nxseq; - bits = 0; + flen = fragsize; + mtu = pch->chan->mtu + 2 - hdrlen; + if (mtu < 4) + mtu = 4; + if (flen > mtu) + flen = mtu; + if (flen == len && nfree == 0) + bits |= E; + frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC); + if (frag == 0) + goto noskb; + q = skb_put(frag, flen + hdrlen); + + /* make the MP header */ + q[0] = PPP_MP >> 8; + q[1] = PPP_MP; + if (ppp->flags & SC_MP_XSHORTSEQ) { + q[2] = bits + ((ppp->nxseq >> 8) & 0xf); + q[3] = ppp->nxseq; + } else { + q[2] = bits; + q[3] = ppp->nxseq >> 16; + q[4] = ppp->nxseq >> 8; + q[5] = ppp->nxseq; } + + /* + * Copy the data in. + * Unfortunately there is a bug in older versions of + * the Linux PPP multilink reconstruction code where it + * drops 0-length fragments. Therefore we make sure the + * fragment has at least one byte of data. Any bytes + * we add in this situation will end up as padding on the + * end of the reconstructed packet. + */ + if (flen == 0) + *skb_put(frag, 1) = 0; + else + memcpy(q + hdrlen, p, flen); + + /* try to send it down the channel */ + chan = pch->chan; + if (!skb_queue_empty(&pch->file.xq) || + !chan->ops->start_xmit(chan, frag)) + skb_queue_tail(&pch->file.xq, frag); + pch->had_frag = 1; + p += flen; + len -= flen; + ++ppp->nxseq; + bits = 0; spin_unlock_bh(&pch->downl); - } while (len > 0); + + if (--nbigger == 0 && fragsize > 0) + --fragsize; + } ppp->nxchan = i; return 1; @@ -1369,7 +1412,7 @@ ppp_channel_push(struct channel *pch) spin_lock_bh(&pch->downl); if (pch->chan != 0) { - while (skb_queue_len(&pch->file.xq) > 0) { + while (!skb_queue_empty(&pch->file.xq)) { skb = skb_dequeue(&pch->file.xq); if (!pch->chan->ops->start_xmit(pch->chan, skb)) { /* put the packet back and try again later */ @@ -1383,7 +1426,7 @@ ppp_channel_push(struct channel *pch) } spin_unlock_bh(&pch->downl); /* see if there is anything from the attached unit to be sent */ - if (skb_queue_len(&pch->file.xq) == 0) { + if (skb_queue_empty(&pch->file.xq)) { read_lock_bh(&pch->upl); ppp = pch->ppp; if (ppp != 0) @@ -1422,7 +1465,7 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb) kfree_skb(skb); return; } - + proto = PPP_PROTO(skb); read_lock_bh(&pch->upl); if (pch->ppp == 0 || proto >= 0xc000 || proto == PPP_CCPFRAG) { @@ -1691,7 +1734,7 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) struct list_head *l; int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; - if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0) + if (!pskb_may_pull(skb, mphdrlen) || ppp->mrru == 0) goto err; /* no good, throw it away */ /* Decode sequence number and begin/end bits */ @@ -2611,8 +2654,8 @@ static void __exit ppp_cleanup(void) if (unregister_chrdev(PPP_MAJOR, "ppp") != 0) printk(KERN_ERR "PPP: failed to unregister PPP device\n"); devfs_remove("ppp"); - class_simple_device_remove(MKDEV(PPP_MAJOR, 0)); - class_simple_destroy(ppp_class); + class_device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0)); + class_destroy(ppp_class); } /*