enum netdev_state_t
 {
-       __LINK_STATE_XOFF=0,
        __LINK_STATE_START,
        __LINK_STATE_PRESENT,
        __LINK_STATE_SCHED,
        __LINK_STATE_NOCARRIER,
        __LINK_STATE_LINKWATCH_PENDING,
        __LINK_STATE_DORMANT,
-       __LINK_STATE_QDISC_RUNNING,
 };
 
 
 # define napi_synchronize(n)   barrier()
 #endif
 
+enum netdev_queue_state_t
+{
+       __QUEUE_STATE_XOFF,
+       __QUEUE_STATE_QDISC_RUNNING,
+};
+
 struct netdev_queue {
        spinlock_t              lock;
        struct net_device       *dev;
        struct Qdisc            *qdisc;
+       unsigned long           state;
        struct sk_buff          *gso_skb;
        spinlock_t              _xmit_lock;
        int                     xmit_lock_owner;
 
 static inline void netif_schedule_queue(struct netdev_queue *txq)
 {
-       struct net_device *dev = txq->dev;
-
-       if (!test_bit(__LINK_STATE_XOFF, &dev->state))
+       if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
                __netif_schedule(txq);
 }
 
  *
  *     Allow upper layers to call the device hard_start_xmit routine.
  */
+static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
+{
+       clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+}
+
 static inline void netif_start_queue(struct net_device *dev)
 {
-       clear_bit(__LINK_STATE_XOFF, &dev->state);
+       netif_tx_start_queue(&dev->tx_queue);
 }
 
 /**
  *     Allow upper layers to call the device hard_start_xmit routine.
  *     Used for flow control when transmit resources are available.
  */
-static inline void netif_wake_queue(struct net_device *dev)
+static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
 {
 #ifdef CONFIG_NETPOLL_TRAP
        if (netpoll_trap()) {
-               clear_bit(__LINK_STATE_XOFF, &dev->state);
+               clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
                return;
        }
 #endif
-       if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state))
-               __netif_schedule(&dev->tx_queue);
+       if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
+               __netif_schedule(dev_queue);
+}
+
+static inline void netif_wake_queue(struct net_device *dev)
+{
+       netif_tx_wake_queue(&dev->tx_queue);
 }
 
 /**
  *     Stop upper layers calling the device hard_start_xmit routine.
  *     Used for flow control when transmit resources are unavailable.
  */
+static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
+{
+       set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+}
+
 static inline void netif_stop_queue(struct net_device *dev)
 {
-       set_bit(__LINK_STATE_XOFF, &dev->state);
+       netif_tx_stop_queue(&dev->tx_queue);
 }
 
 /**
  *
  *     Test if transmit queue on device is currently unable to send.
  */
+static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
+{
+       return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+}
+
 static inline int netif_queue_stopped(const struct net_device *dev)
 {
-       return test_bit(__LINK_STATE_XOFF, &dev->state);
+       return netif_tx_queue_stopped(&dev->tx_queue);
 }
 
 /**
  */
 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
 {
-       clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
+       clear_bit(__QUEUE_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
 }
 
 /**
        if (netpoll_trap())
                return;
 #endif
-       set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
+       set_bit(__QUEUE_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
 }
 
 /**
 static inline int __netif_subqueue_stopped(const struct net_device *dev,
                                         u16 queue_index)
 {
-       return test_bit(__LINK_STATE_XOFF,
+       return test_bit(__QUEUE_STATE_XOFF,
                        &dev->egress_subqueue[queue_index].state);
 }
 
        if (netpoll_trap())
                return;
 #endif
-       if (test_and_clear_bit(__LINK_STATE_XOFF,
+       if (test_and_clear_bit(__QUEUE_STATE_XOFF,
                               &dev->egress_subqueue[queue_index].state))
                __netif_schedule(&dev->tx_queue);
 }
 
 /*
  * NOTE: Called under queue->lock with locally disabled BH.
  *
- * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this
- * device at a time. queue->lock serializes queue accesses for
- * this device AND txq->qdisc pointer itself.
+ * __QUEUE_STATE_QDISC_RUNNING guarantees only one CPU can process
+ * this queue at a time. queue->lock serializes queue accesses for
+ * this queue AND txq->qdisc pointer itself.
  *
  *  netif_tx_lock serializes accesses to device driver.
  *
                }
        }
 
-       clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
+       clear_bit(__QUEUE_STATE_QDISC_RUNNING, &txq->state);
 }
 
 static void dev_watchdog(unsigned long arg)
 
 void dev_deactivate(struct net_device *dev)
 {
+       struct netdev_queue *dev_queue = &dev->tx_queue;
        int running;
 
-       dev_deactivate_queue(&dev->tx_queue, &noop_qdisc);
+       dev_deactivate_queue(dev_queue, &noop_qdisc);
 
        dev_watchdog_down(dev);
 
 
        /* Wait for outstanding qdisc_run calls. */
        do {
-               while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
+               while (test_bit(__QUEUE_STATE_QDISC_RUNNING, &dev_queue->state))
                        yield();
 
                /*
                 * Double-check inside queue lock to ensure that all effects
                 * of the queue run are visible when we return.
                 */
-               spin_lock_bh(&dev->tx_queue.lock);
-               running = test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
-               spin_unlock_bh(&dev->tx_queue.lock);
+               spin_lock_bh(&dev_queue->lock);
+               running = test_bit(__QUEUE_STATE_QDISC_RUNNING,
+                                  &dev_queue->state);
+               spin_unlock_bh(&dev_queue->lock);
 
                /*
                 * The running flag should never be set at this point because