MODULE_PARM_DESC(numifbs, "Number of ifb devices");
 
 /*
- * dev_ifb->tx_queue.lock is usually taken after dev->ingress_lock,
+ * dev_ifb->tx_queue.lock is usually taken after dev->rx_queue.lock,
  * reversely to e.g. qdisc_lock_tree(). It should be safe until
- * ifb doesn't take dev->tx_queue.lock with dev_ifb->ingress_lock.
+ * ifb doesn't take dev->tx_queue.lock with dev_ifb->rx_queue.lock.
  * But lockdep should know that ifb has different locks from dev.
  */
 static struct lock_class_key ifb_tx_queue_lock_key;
-static struct lock_class_key ifb_ingress_lock_key;
+static struct lock_class_key ifb_rx_queue_lock_key;
 
 
 static int __init ifb_init_one(int index)
                goto err;
 
        lockdep_set_class(&dev_ifb->tx_queue.lock, &ifb_tx_queue_lock_key);
-       lockdep_set_class(&dev_ifb->ingress_lock, &ifb_ingress_lock_key);
+       lockdep_set_class(&dev_ifb->rx_queue.lock, &ifb_rx_queue_lock_key);
 
        return 0;
 
 
        struct netdev_queue     rx_queue;
        struct netdev_queue     tx_queue ____cacheline_aligned_in_smp;
 
-       /* ingress path synchronizer */
-       spinlock_t              ingress_lock;
        struct Qdisc            *qdisc_ingress;
 
 /*
 
  */
 static int ing_filter(struct sk_buff *skb)
 {
-       struct Qdisc *q;
        struct net_device *dev = skb->dev;
-       int result = TC_ACT_OK;
        u32 ttl = G_TC_RTTL(skb->tc_verd);
+       struct netdev_queue *rxq;
+       int result = TC_ACT_OK;
+       struct Qdisc *q;
 
        if (MAX_RED_LOOP < ttl++) {
                printk(KERN_WARNING
        skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
        skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
 
-       spin_lock(&dev->ingress_lock);
+       rxq = &dev->rx_queue;
+
+       spin_lock(&rxq->lock);
        if ((q = dev->qdisc_ingress) != NULL)
                result = q->enqueue(skb, q);
-       spin_unlock(&dev->ingress_lock);
+       spin_unlock(&rxq->lock);
 
        return result;
 }
        spin_lock_init(&dev->_xmit_lock);
        netdev_set_lockdep_class(&dev->_xmit_lock, dev->type);
        dev->xmit_lock_owner = -1;
-       spin_lock_init(&dev->ingress_lock);
 
        dev->iflink = -1;
 
 
 
        sch->parent = parent;
 
+       sch->stats_lock = &dev_queue->lock;
        if (handle == TC_H_INGRESS) {
                sch->flags |= TCQ_F_INGRESS;
-               sch->stats_lock = &dev->ingress_lock;
                handle = TC_H_MAKE(TC_H_INGRESS, 0);
        } else {
-               sch->stats_lock = &dev_queue->lock;
                if (handle == 0) {
                        handle = qdisc_alloc_handle(dev);
                        err = -ENOMEM;
 
  * - enqueue, dequeue are serialized via top level device
  *   spinlock queue->lock.
  * - ingress filtering is serialized via top level device
- *   spinlock dev->ingress_lock.
+ *   spinlock dev->rx_queue.lock.
  * - updates to tree and tree walking are only done under the rtnl mutex.
  */
 
 void qdisc_lock_tree(struct net_device *dev)
        __acquires(dev->tx_queue.lock)
-       __acquires(dev->ingress_lock)
+       __acquires(dev->rx_queue.lock)
 {
        spin_lock_bh(&dev->tx_queue.lock);
-       spin_lock(&dev->ingress_lock);
+       spin_lock(&dev->rx_queue.lock);
 }
 EXPORT_SYMBOL(qdisc_lock_tree);
 
 void qdisc_unlock_tree(struct net_device *dev)
-       __releases(dev->ingress_lock)
+       __releases(dev->rx_queue.lock)
        __releases(dev->tx_queue.lock)
 {
-       spin_unlock(&dev->ingress_lock);
+       spin_unlock(&dev->rx_queue.lock);
        spin_unlock_bh(&dev->tx_queue.lock);
 }
 EXPORT_SYMBOL(qdisc_unlock_tree);