X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fnet%2Fvirtio_net.c;h=ad43421ab6f1cf49d0335df7b9ea6cb8f365285b;hb=5539ae9613587e4a4eec42d420b8bdd9ff552a65;hp=fdc23678117bdd31157a13c6d428d2c1951dc778;hpb=6585b4a71f523485ecf33e7f4569be4095d63699;p=linux-2.6-omap-h63xx.git diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index fdc23678117..ad43421ab6f 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -41,6 +41,9 @@ struct virtnet_info struct net_device *dev; struct napi_struct napi; + /* The skb we couldn't send because buffers were full. */ + struct sk_buff *last_xmit_skb; + /* Number of input buffers, and max we've ever had. */ unsigned int num, max; @@ -142,10 +145,10 @@ drop: static void try_fill_recv(struct virtnet_info *vi) { struct sk_buff *skb; - struct scatterlist sg[1+MAX_SKB_FRAGS]; + struct scatterlist sg[2+MAX_SKB_FRAGS]; int num, err; - sg_init_table(sg, 1+MAX_SKB_FRAGS); + sg_init_table(sg, 2+MAX_SKB_FRAGS); for (;;) { skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN); if (unlikely(!skb)) @@ -203,8 +206,11 @@ again: if (received < budget) { netif_rx_complete(vi->dev, napi); if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) - && netif_rx_reschedule(vi->dev, napi)) + && napi_schedule_prep(napi)) { + vi->rvq->vq_ops->disable_cb(vi->rvq); + __netif_rx_schedule(vi->dev, napi); goto again; + } } return received; @@ -218,24 +224,24 @@ static void free_old_xmit_skbs(struct virtnet_info *vi) while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) { pr_debug("Sent skb %p\n", skb); __skb_unlink(skb, &vi->send); - vi->dev->stats.tx_bytes += len; + vi->dev->stats.tx_bytes += skb->len; vi->dev->stats.tx_packets++; kfree_skb(skb); } } -static int start_xmit(struct sk_buff *skb, struct net_device *dev) +static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) { - struct virtnet_info *vi = netdev_priv(dev); - int num, err; - struct scatterlist sg[1+MAX_SKB_FRAGS]; + int num; + struct scatterlist sg[2+MAX_SKB_FRAGS]; struct virtio_net_hdr *hdr; const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; - DECLARE_MAC_BUF(mac); - sg_init_table(sg, 1+MAX_SKB_FRAGS); + sg_init_table(sg, 2+MAX_SKB_FRAGS); - pr_debug("%s: xmit %p %s\n", dev->name, skb, print_mac(mac, dest)); + pr_debug("%s: xmit %p " MAC_FMT "\n", vi->dev->name, skb, + dest[0], dest[1], dest[2], + dest[3], dest[4], dest[5]); /* Encode metadata header at front. */ hdr = skb_vnet_hdr(skb); @@ -268,31 +274,61 @@ static int start_xmit(struct sk_buff *skb, struct net_device *dev) vnet_hdr_to_sg(sg, skb); num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; - __skb_queue_head(&vi->send, skb); + + return vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); +} + +static int start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct virtnet_info *vi = netdev_priv(dev); again: /* Free up any pending old buffers before queueing new ones. */ free_old_xmit_skbs(vi); - err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); - if (err) { - pr_debug("%s: virtio not prepared to send\n", dev->name); - netif_stop_queue(dev); - - /* Activate callback for using skbs: if this fails it - * means some were used in the meantime. */ - if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { - printk("Unlikely: restart svq failed\n"); - netif_start_queue(dev); - goto again; + + /* If we has a buffer left over from last time, send it now. */ + if (vi->last_xmit_skb) { + if (xmit_skb(vi, vi->last_xmit_skb) != 0) { + /* Drop this skb: we only queue one. */ + vi->dev->stats.tx_dropped++; + kfree_skb(skb); + goto stop_queue; } - __skb_unlink(skb, &vi->send); + vi->last_xmit_skb = NULL; + } - return NETDEV_TX_BUSY; + /* Put new one in send queue and do transmit */ + __skb_queue_head(&vi->send, skb); + if (xmit_skb(vi, skb) != 0) { + vi->last_xmit_skb = skb; + goto stop_queue; } +done: vi->svq->vq_ops->kick(vi->svq); + return NETDEV_TX_OK; + +stop_queue: + pr_debug("%s: virtio not prepared to send\n", dev->name); + netif_stop_queue(dev); + + /* Activate callback for using skbs: if this returns false it + * means some were used in the meantime. */ + if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { + vi->svq->vq_ops->disable_cb(vi->svq); + netif_start_queue(dev); + goto again; + } + goto done; +} - return 0; +#ifdef CONFIG_NET_POLL_CONTROLLER +static void virtnet_netpoll(struct net_device *dev) +{ + struct virtnet_info *vi = netdev_priv(dev); + + napi_schedule(&vi->napi); } +#endif static int virtnet_open(struct net_device *dev) { @@ -336,6 +372,9 @@ static int virtnet_probe(struct virtio_device *vdev) dev->stop = virtnet_close; dev->hard_start_xmit = start_xmit; dev->features = NETIF_F_HIGHDMA; +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = virtnet_netpoll; +#endif SET_NETDEV_DEV(dev, &vdev->dev); /* Do we support "hardware" checksums? */ @@ -346,6 +385,15 @@ static int virtnet_probe(struct virtio_device *vdev) dev->features |= NETIF_F_TSO | NETIF_F_UFO | NETIF_F_TSO_ECN | NETIF_F_TSO6; } + /* Individual feature bits: what can host handle? */ + if (gso && vdev->config->feature(vdev, VIRTIO_NET_F_HOST_TSO4)) + dev->features |= NETIF_F_TSO; + if (gso && vdev->config->feature(vdev, VIRTIO_NET_F_HOST_TSO6)) + dev->features |= NETIF_F_TSO6; + if (gso && vdev->config->feature(vdev, VIRTIO_NET_F_HOST_ECN)) + dev->features |= NETIF_F_TSO_ECN; + if (gso && vdev->config->feature(vdev, VIRTIO_NET_F_HOST_UFO)) + dev->features |= NETIF_F_UFO; } /* Configuration may specify what MAC to use. Otherwise random. */ @@ -361,6 +409,7 @@ static int virtnet_probe(struct virtio_device *vdev) netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight); vi->dev = dev; vi->vdev = vdev; + vdev->priv = vi; /* We expect two virtqueues, receive then send. */ vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); @@ -395,7 +444,6 @@ static int virtnet_probe(struct virtio_device *vdev) } pr_debug("virtnet: registered device %s\n", dev->name); - vdev->priv = vi; return 0; unregister: