if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
                if (queue->input_pkt_queue.qlen) {
 enqueue:
-                       dev_hold(skb->dev);
                        __skb_queue_tail(&queue->input_pkt_queue, skb);
                        local_irq_restore(flags);
                        return NET_RX_SUCCESS;
        return ret;
 }
 
+/* Network device is going away, flush any packets still pending  */
+static void flush_backlog(void *arg)
+{
+       struct net_device *dev = arg;
+       struct softnet_data *queue = &__get_cpu_var(softnet_data);
+       struct sk_buff *skb, *tmp;
+
+       skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
+               if (skb->dev == dev) {
+                       __skb_unlink(skb, &queue->input_pkt_queue);
+                       kfree_skb(skb);
+               }
+}
+
 static int process_backlog(struct napi_struct *napi, int quota)
 {
        int work = 0;
        napi->weight = weight_p;
        do {
                struct sk_buff *skb;
-               struct net_device *dev;
 
                local_irq_disable();
                skb = __skb_dequeue(&queue->input_pkt_queue);
                        local_irq_enable();
                        break;
                }
-
                local_irq_enable();
 
-               dev = skb->dev;
-
                netif_receive_skb(skb);
-
-               dev_put(dev);
        } while (++work < quota && jiffies == start_time);
 
        return work;
 
                dev->reg_state = NETREG_UNREGISTERED;
 
+               on_each_cpu(flush_backlog, dev, 1);
+
                netdev_wait_allrefs(dev);
 
                /* paranoia */