static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
 {
        if (unlikely(skb->next))
-               q->gso_skb = skb;
+               __skb_queue_head(&q->requeue, skb);
        else
                q->ops->requeue(skb, q);
 
 {
        struct sk_buff *skb;
 
-       if ((skb = q->gso_skb))
-               q->gso_skb = NULL;
-       else
+       skb = __skb_dequeue(&q->requeue);
+       if (!skb)
                skb = q->dequeue(q);
 
        return skb;
        .flags          =       TCQ_F_BUILTIN,
        .ops            =       &noop_qdisc_ops,
        .list           =       LIST_HEAD_INIT(noop_qdisc.list),
+       .requeue.lock   =       __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
        .q.lock         =       __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
        .dev_queue      =       &noop_netdev_queue,
 };
        .flags          =       TCQ_F_BUILTIN,
        .ops            =       &noqueue_qdisc_ops,
        .list           =       LIST_HEAD_INIT(noqueue_qdisc.list),
+       .requeue.lock   =       __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
        .q.lock         =       __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
        .dev_queue      =       &noqueue_netdev_queue,
 };
        sch->padded = (char *) sch - (char *) p;
 
        INIT_LIST_HEAD(&sch->list);
+       skb_queue_head_init(&sch->requeue);
        skb_queue_head_init(&sch->q);
        sch->ops = ops;
        sch->enqueue = ops->enqueue;
        module_put(ops->owner);
        dev_put(qdisc_dev(qdisc));
 
-       kfree_skb(qdisc->gso_skb);
+       __skb_queue_purge(&qdisc->requeue);
 
        kfree((char *) qdisc - qdisc->padded);
 }