atomic_t                refcnt;
        struct sk_buff_head     q;
        struct netdev_queue     *dev_queue;
-       struct net_device       *dev;
        struct list_head        list;
 
        struct gnet_stats_basic bstats;
        struct tcf_proto_ops    *ops;
 };
 
+static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
+{
+       return qdisc->dev_queue->dev;
+}
 
 extern void qdisc_lock_tree(struct net_device *dev);
 extern void qdisc_unlock_tree(struct net_device *dev);
 
-#define sch_tree_lock(q)       qdisc_lock_tree((q)->dev)
-#define sch_tree_unlock(q)     qdisc_unlock_tree((q)->dev)
-#define tcf_tree_lock(tp)      qdisc_lock_tree((tp)->q->dev)
-#define tcf_tree_unlock(tp)    qdisc_unlock_tree((tp)->q->dev)
+#define sch_tree_lock(q)       qdisc_lock_tree(qdisc_dev(q))
+#define sch_tree_unlock(q)     qdisc_unlock_tree(qdisc_dev(q))
+#define tcf_tree_lock(tp)      qdisc_lock_tree(qdisc_dev((tp)->q))
+#define tcf_tree_unlock(tp)    qdisc_unlock_tree(qdisc_dev((tp)->q))
 
 extern struct Qdisc noop_qdisc;
 extern struct Qdisc_ops noop_qdisc_ops;
 extern void qdisc_reset(struct Qdisc *qdisc);
 extern void qdisc_destroy(struct Qdisc *qdisc);
 extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
-extern struct Qdisc *qdisc_alloc(struct net_device *dev,
-                                struct netdev_queue *dev_queue,
+extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
                                 struct Qdisc_ops *ops);
 extern struct Qdisc *qdisc_create_dflt(struct net_device *dev,
                                       struct netdev_queue *dev_queue,
 
  * negative return value indicates to drop the frame */
 static int classify80211(struct sk_buff *skb, struct Qdisc *qd)
 {
-       struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
+       struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 
        if (!ieee80211_is_data(hdr->frame_control)) {
 
 static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
 {
-       struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
+       struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
        struct ieee80211_hw *hw = &local->hw;
        struct ieee80211_sched_data *q = qdisc_priv(qd);
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd)
 {
        struct ieee80211_sched_data *q = qdisc_priv(qd);
-       struct net_device *dev = qd->dev;
+       struct net_device *dev = qdisc_dev(qd);
        struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
        struct ieee80211_hw *hw = &local->hw;
        struct sk_buff *skb;
 static void wme_qdiscop_reset(struct Qdisc* qd)
 {
        struct ieee80211_sched_data *q = qdisc_priv(qd);
-       struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
+       struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
        struct ieee80211_hw *hw = &local->hw;
        int queue;
 
 static void wme_qdiscop_destroy(struct Qdisc* qd)
 {
        struct ieee80211_sched_data *q = qdisc_priv(qd);
-       struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
+       struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
        struct ieee80211_hw *hw = &local->hw;
        int queue;
 
 static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
 {
        struct ieee80211_sched_data *q = qdisc_priv(qd);
-       struct net_device *dev = qd->dev;
+       struct net_device *dev = qdisc_dev(qd);
        struct ieee80211_local *local;
        struct ieee80211_hw *hw;
        int err = 0, i;
        /* create child queues */
        for (i = 0; i < QD_NUM(hw); i++) {
                skb_queue_head_init(&q->requeued[i]);
-               q->queues[i] = qdisc_create_dflt(qd->dev, qd->dev_queue,
+               q->queues[i] = qdisc_create_dflt(qdisc_dev(qd), qd->dev_queue,
                                                 &pfifo_qdisc_ops,
                                                 qd->handle);
                if (!q->queues[i]) {
                             struct Qdisc *new, struct Qdisc **old)
 {
        struct ieee80211_sched_data *q = qdisc_priv(qd);
-       struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
+       struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
        struct ieee80211_hw *hw = &local->hw;
        unsigned long queue = arg - 1;
 
 wme_classop_leaf(struct Qdisc *qd, unsigned long arg)
 {
        struct ieee80211_sched_data *q = qdisc_priv(qd);
-       struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
+       struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
        struct ieee80211_hw *hw = &local->hw;
        unsigned long queue = arg - 1;
 
 
 static unsigned long wme_classop_get(struct Qdisc *qd, u32 classid)
 {
-       struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
+       struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
        struct ieee80211_hw *hw = &local->hw;
        unsigned long queue = TC_H_MIN(classid);
 
                              struct nlattr **tca, unsigned long *arg)
 {
        unsigned long cl = *arg;
-       struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
+       struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
        struct ieee80211_hw *hw = &local->hw;
 
        if (cl - 1 > QD_NUM(hw))
  * when we add WMM-SA support - TSPECs may be deleted here */
 static int wme_classop_delete(struct Qdisc *qd, unsigned long cl)
 {
-       struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
+       struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
        struct ieee80211_hw *hw = &local->hw;
 
        if (cl - 1 > QD_NUM(hw))
                                  struct sk_buff *skb, struct tcmsg *tcm)
 {
        struct ieee80211_sched_data *q = qdisc_priv(qd);
-       struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
+       struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
        struct ieee80211_hw *hw = &local->hw;
 
        if (cl - 1 > QD_NUM(hw))
 
 static void wme_classop_walk(struct Qdisc *qd, struct qdisc_walker *arg)
 {
-       struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
+       struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
        struct ieee80211_hw *hw = &local->hw;
        int queue;
 
 
        tcm->tcm_family = AF_UNSPEC;
        tcm->tcm__pad1 = 0;
        tcm->tcm__pad1 = 0;
-       tcm->tcm_ifindex = tp->q->dev->ifindex;
+       tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex;
        tcm->tcm_parent = tp->classid;
        tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
        NLA_PUT_STRING(skb, TCA_KIND, tp->ops->kind);
 
                        *fp = f->next;
                        tcf_tree_unlock(tp);
 
-                       route4_reset_fastmap(tp->q->dev, head, f->id);
+                       route4_reset_fastmap(qdisc_dev(tp->q), head, f->id);
                        route4_delete_filter(tp, f);
 
                        /* Strip tree */
        }
        tcf_tree_unlock(tp);
 
-       route4_reset_fastmap(tp->q->dev, head, f->id);
+       route4_reset_fastmap(qdisc_dev(tp->q), head, f->id);
        *arg = (unsigned long)f;
        return 0;
 
 
 {
        struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
                                                 timer);
-       struct net_device *dev = wd->qdisc->dev;
+       struct net_device *dev = qdisc_dev(wd->qdisc);
 
        wd->qdisc->flags &= ~TCQ_F_THROTTLED;
        smp_wmb();
                if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
                        return;
 
-               sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid));
+               sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
                if (sch == NULL) {
                        WARN_ON(parentid != TC_H_ROOT);
                        return;
        if (ops == NULL)
                goto err_out;
 
-       sch = qdisc_alloc(dev, dev_queue, ops);
+       sch = qdisc_alloc(dev_queue, ops);
        if (IS_ERR(sch)) {
                err = PTR_ERR(sch);
                goto err_out2;
        tcm->tcm_family = AF_UNSPEC;
        tcm->tcm__pad1 = 0;
        tcm->tcm__pad2 = 0;
-       tcm->tcm_ifindex = q->dev->ifindex;
+       tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
        tcm->tcm_parent = clid;
        tcm->tcm_handle = q->handle;
        tcm->tcm_info = atomic_read(&q->refcnt);
        nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
        tcm = NLMSG_DATA(nlh);
        tcm->tcm_family = AF_UNSPEC;
-       tcm->tcm_ifindex = q->dev->ifindex;
+       tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
        tcm->tcm_parent = q->handle;
        tcm->tcm_handle = q->handle;
        tcm->tcm_info = 0;
 
                goto err_out;
        }
        flow->filter_list = NULL;
-       flow->q = qdisc_create_dflt(sch->dev, sch->dev_queue,
+       flow->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
                                    &pfifo_qdisc_ops, classid);
        if (!flow->q)
                flow->q = &noop_qdisc;
 
        pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
        p->flows = &p->link;
-       p->link.q = qdisc_create_dflt(sch->dev, sch->dev_queue,
+       p->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
                                      &pfifo_qdisc_ops, sch->handle);
        if (!p->link.q)
                p->link.q = &noop_qdisc;
 
        }
 
        sch->flags &= ~TCQ_F_THROTTLED;
-       netif_schedule(sch->dev);
+       netif_schedule(qdisc_dev(sch));
        return HRTIMER_NORESTART;
 }
 
                                cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
                                        q->quanta[prio];
                        }
-                       if (cl->quantum <= 0 || cl->quantum>32*cl->qdisc->dev->mtu) {
+                       if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) {
                                printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->common.classid, cl->quantum);
-                               cl->quantum = cl->qdisc->dev->mtu/2 + 1;
+                               cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
                        }
                }
        }
        q->link.sibling = &q->link;
        q->link.common.classid = sch->handle;
        q->link.qdisc = sch;
-       if (!(q->link.q = qdisc_create_dflt(sch->dev, sch->dev_queue,
+       if (!(q->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
                                            &pfifo_qdisc_ops,
                                            sch->handle)))
                q->link.q = &noop_qdisc;
        q->link.cpriority = TC_CBQ_MAXPRIO-1;
        q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
        q->link.overlimit = cbq_ovl_classic;
-       q->link.allot = psched_mtu(sch->dev);
+       q->link.allot = psched_mtu(qdisc_dev(sch));
        q->link.quantum = q->link.allot;
        q->link.weight = q->link.R_tab->rate.rate;
 
 
        if (cl) {
                if (new == NULL) {
-                       new = qdisc_create_dflt(sch->dev, sch->dev_queue,
+                       new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
                                                &pfifo_qdisc_ops,
                                                cl->common.classid);
                        if (new == NULL)
 #ifdef CONFIG_NET_CLS_ACT
                struct cbq_sched_data *q = qdisc_priv(sch);
 
-               spin_lock_bh(&sch->dev->queue_lock);
+               spin_lock_bh(&qdisc_dev(sch)->queue_lock);
                if (q->rx_class == cl)
                        q->rx_class = NULL;
-               spin_unlock_bh(&sch->dev->queue_lock);
+               spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
 #endif
 
                cbq_destroy_class(sch, cl);
 
                if (tca[TCA_RATE])
                        gen_replace_estimator(&cl->bstats, &cl->rate_est,
-                                             &sch->dev->queue_lock,
+                                             &qdisc_dev(sch)->queue_lock,
                                              tca[TCA_RATE]);
                return 0;
        }
        cl->R_tab = rtab;
        rtab = NULL;
        cl->refcnt = 1;
-       if (!(cl->q = qdisc_create_dflt(sch->dev, sch->dev_queue,
+       if (!(cl->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
                                        &pfifo_qdisc_ops, classid)))
                cl->q = &noop_qdisc;
        cl->common.classid = classid;
 
        if (tca[TCA_RATE])
                gen_new_estimator(&cl->bstats, &cl->rate_est,
-                                 &sch->dev->queue_lock, tca[TCA_RATE]);
+                                 &qdisc_dev(sch)->queue_lock, tca[TCA_RATE]);
 
        *arg = (unsigned long)cl;
        return 0;
 
                sch, p, new, old);
 
        if (new == NULL) {
-               new = qdisc_create_dflt(sch->dev, sch->dev_queue,
+               new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
                                        &pfifo_qdisc_ops,
                                        sch->handle);
                if (new == NULL)
        p->default_index = default_index;
        p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
 
-       p->q = qdisc_create_dflt(sch->dev, sch->dev_queue,
+       p->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
                                 &pfifo_qdisc_ops, sch->handle);
        if (p->q == NULL)
                p->q = &noop_qdisc;
 
        struct fifo_sched_data *q = qdisc_priv(sch);
 
        if (opt == NULL) {
-               u32 limit = sch->dev->tx_queue_len ? : 1;
+               u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1;
 
                if (sch->ops == &bfifo_qdisc_ops)
-                       limit *= sch->dev->mtu;
+                       limit *= qdisc_dev(sch)->mtu;
 
                q->limit = limit;
        } else {
        struct Qdisc *q;
        int err = -ENOMEM;
 
-       q = qdisc_create_dflt(sch->dev, sch->dev_queue,
+       q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
                              ops, TC_H_MAKE(sch->handle, 1));
        if (q) {
                err = fifo_set_limit(q, limit);
 
 {
        struct sk_buff_head *list = prio2list(skb, qdisc);
 
-       if (skb_queue_len(list) < qdisc->dev->tx_queue_len) {
+       if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) {
                qdisc->q.qlen++;
                return __qdisc_enqueue_tail(skb, qdisc, list);
        }
        .owner          =       THIS_MODULE,
 };
 
-struct Qdisc *qdisc_alloc(struct net_device *dev,
-                         struct netdev_queue *dev_queue,
+struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
                          struct Qdisc_ops *ops)
 {
        void *p;
        sch->enqueue = ops->enqueue;
        sch->dequeue = ops->dequeue;
        sch->dev_queue = dev_queue;
-       sch->dev = dev;
-       dev_hold(dev);
+       dev_hold(qdisc_dev(sch));
        atomic_set(&sch->refcnt, 1);
 
        return sch;
 {
        struct Qdisc *sch;
 
-       sch = qdisc_alloc(dev, dev_queue, ops);
+       sch = qdisc_alloc(dev_queue, ops);
        if (IS_ERR(sch))
                goto errout;
        sch->stats_lock = &dev->queue_lock;
                ops->destroy(qdisc);
 
        module_put(ops->owner);
-       dev_put(qdisc->dev);
+       dev_put(qdisc_dev(qdisc));
        call_rcu(&qdisc->q_rcu, __qdisc_destroy);
 }
 EXPORT_SYMBOL(qdisc_destroy);
 
                         * if no default DP has been configured. This
                         * allows for DP flows to be left untouched.
                         */
-                       if (skb_queue_len(&sch->q) < sch->dev->tx_queue_len)
+                       if (skb_queue_len(&sch->q) < qdisc_dev(sch)->tx_queue_len)
                                return qdisc_enqueue_tail(skb, sch);
                        else
                                goto drop;
 
 
                if (tca[TCA_RATE])
                        gen_replace_estimator(&cl->bstats, &cl->rate_est,
-                                             &sch->dev->queue_lock,
+                                             &qdisc_dev(sch)->queue_lock,
                                              tca[TCA_RATE]);
                return 0;
        }
        cl->refcnt    = 1;
        cl->sched     = q;
        cl->cl_parent = parent;
-       cl->qdisc = qdisc_create_dflt(sch->dev, sch->dev_queue,
+       cl->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
                                      &pfifo_qdisc_ops, classid);
        if (cl->qdisc == NULL)
                cl->qdisc = &noop_qdisc;
 
        if (tca[TCA_RATE])
                gen_new_estimator(&cl->bstats, &cl->rate_est,
-                                 &sch->dev->queue_lock, tca[TCA_RATE]);
+                                 &qdisc_dev(sch)->queue_lock, tca[TCA_RATE]);
        *arg = (unsigned long)cl;
        return 0;
 }
        if (cl->level > 0)
                return -EINVAL;
        if (new == NULL) {
-               new = qdisc_create_dflt(sch->dev, sch->dev_queue,
+               new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
                                        &pfifo_qdisc_ops,
                                        cl->cl_common.classid);
                if (new == NULL)
        q->root.cl_common.classid = sch->handle;
        q->root.refcnt  = 1;
        q->root.sched   = q;
-       q->root.qdisc = qdisc_create_dflt(sch->dev, sch->dev_queue,
+       q->root.qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
                                          &pfifo_qdisc_ops,
                                          sch->handle);
        if (q->root.qdisc == NULL)
 
        qdisc_watchdog_init(&q->watchdog, sch);
        skb_queue_head_init(&q->direct_queue);
 
-       q->direct_qlen = sch->dev->tx_queue_len;
+       q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
        if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
                q->direct_qlen = 2;
 
        struct nlattr *nest;
        struct tc_htb_glob gopt;
 
-       spin_lock_bh(&sch->dev->queue_lock);
+       spin_lock_bh(&qdisc_dev(sch)->queue_lock);
 
        gopt.direct_pkts = q->direct_pkts;
        gopt.version = HTB_VER;
        NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
        nla_nest_end(skb, nest);
 
-       spin_unlock_bh(&sch->dev->queue_lock);
+       spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
        return skb->len;
 
 nla_put_failure:
-       spin_unlock_bh(&sch->dev->queue_lock);
+       spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
        nla_nest_cancel(skb, nest);
        return -1;
 }
        struct nlattr *nest;
        struct tc_htb_opt opt;
 
-       spin_lock_bh(&sch->dev->queue_lock);
+       spin_lock_bh(&qdisc_dev(sch)->queue_lock);
        tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
        tcm->tcm_handle = cl->common.classid;
        if (!cl->level && cl->un.leaf.q)
        NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
 
        nla_nest_end(skb, nest);
-       spin_unlock_bh(&sch->dev->queue_lock);
+       spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
        return skb->len;
 
 nla_put_failure:
-       spin_unlock_bh(&sch->dev->queue_lock);
+       spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
        nla_nest_cancel(skb, nest);
        return -1;
 }
 
        if (cl && !cl->level) {
                if (new == NULL &&
-                   (new = qdisc_create_dflt(sch->dev, sch->dev_queue,
+                   (new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
                                             &pfifo_qdisc_ops,
                                             cl->common.classid))
                    == NULL)
                return -EBUSY;
 
        if (!cl->level && htb_parent_last_child(cl)) {
-               new_q = qdisc_create_dflt(sch->dev, sch->dev_queue,
+               new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
                                          &pfifo_qdisc_ops,
                                          cl->parent->common.classid);
                last_child = 1;
                        goto failure;
 
                gen_new_estimator(&cl->bstats, &cl->rate_est,
-                                 &sch->dev->queue_lock,
+                                 &qdisc_dev(sch)->queue_lock,
                                  tca[TCA_RATE] ? : &est.nla);
                cl->refcnt = 1;
                cl->children = 0;
                /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
                   so that can't be used inside of sch_tree_lock
                   -- thanks to Karlis Peisenieks */
-               new_q = qdisc_create_dflt(sch->dev, sch->dev_queue,
+               new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
                                          &pfifo_qdisc_ops, classid);
                sch_tree_lock(sch);
                if (parent && !parent->level) {
        } else {
                if (tca[TCA_RATE])
                        gen_replace_estimator(&cl->bstats, &cl->rate_est,
-                                             &sch->dev->queue_lock,
+                                             &qdisc_dev(sch)->queue_lock,
                                              tca[TCA_RATE]);
                sch_tree_lock(sch);
        }
 
         * skb will be queued.
         */
        if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
-               struct Qdisc *rootq = sch->dev->qdisc;
+               struct Qdisc *rootq = qdisc_dev(sch)->qdisc;
                u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
                q->duplicate = 0;
 
        for (i = 0; i < n; i++)
                d->table[i] = data[i];
 
-       spin_lock_bh(&sch->dev->queue_lock);
+       spin_lock_bh(&qdisc_dev(sch)->queue_lock);
        d = xchg(&q->delay_dist, d);
-       spin_unlock_bh(&sch->dev->queue_lock);
+       spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
 
        kfree(d);
        return 0;
 
                q->limit = ctl->limit;
        } else
-               q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
+               q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
 
        q->oldest = PSCHED_PASTPERFECT;
        return 0;
 
        qdisc_watchdog_init(&q->watchdog, sch);
 
-       q->qdisc = qdisc_create_dflt(sch->dev, sch->dev_queue,
+       q->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
                                     &tfifo_qdisc_ops,
                                     TC_H_MAKE(sch->handle, 1));
        if (!q->qdisc) {
 
                 * pulling an skb.  This way we avoid excessive requeues
                 * for slower queues.
                 */
-               if (!__netif_subqueue_stopped(sch->dev, (q->mq ? prio : 0))) {
+               if (!__netif_subqueue_stopped(qdisc_dev(sch),
+                                             (q->mq ? prio : 0))) {
                        qdisc = q->queues[prio];
                        skb = qdisc->dequeue(qdisc);
                        if (skb) {
                 * for slower queues.  If the queue is stopped, try the
                 * next queue.
                 */
-               if (!__netif_subqueue_stopped(sch->dev,
-                                           (q->mq ? q->curband : 0))) {
+               if (!__netif_subqueue_stopped(qdisc_dev(sch),
+                                             (q->mq ? q->curband : 0))) {
                        qdisc = q->queues[q->curband];
                        skb = qdisc->dequeue(qdisc);
                        if (skb) {
        if (q->mq) {
                if (sch->parent != TC_H_ROOT)
                        return -EINVAL;
-               if (netif_is_multiqueue(sch->dev)) {
+               if (netif_is_multiqueue(qdisc_dev(sch))) {
                        if (q->bands == 0)
-                               q->bands = sch->dev->egress_subqueue_count;
-                       else if (q->bands != sch->dev->egress_subqueue_count)
+                               q->bands = qdisc_dev(sch)->egress_subqueue_count;
+                       else if (q->bands != qdisc_dev(sch)->egress_subqueue_count)
                                return -EINVAL;
                } else
                        return -EOPNOTSUPP;
        for (i=0; i<q->bands; i++) {
                if (q->queues[i] == &noop_qdisc) {
                        struct Qdisc *child;
-                       child = qdisc_create_dflt(sch->dev, sch->dev_queue,
+                       child = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
                                                  &pfifo_qdisc_ops,
                                                  TC_H_MAKE(sch->handle, i + 1));
                        if (child) {
 
                return -EINVAL;
 
        sch_tree_lock(sch);
-       q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
+       q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
        q->perturb_period = ctl->perturb_period * HZ;
        if (ctl->limit)
                q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
        q->max_depth = 0;
        q->tail = SFQ_DEPTH;
        if (opt == NULL) {
-               q->quantum = psched_mtu(sch->dev);
+               q->quantum = psched_mtu(qdisc_dev(sch));
                q->perturb_period = 0;
                q->perturbation = net_random();
        } else {
 
 static int
 teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
 {
-       struct net_device *dev = sch->dev;
+       struct net_device *dev = qdisc_dev(sch);
        struct teql_sched_data *q = qdisc_priv(sch);
 
        if (q->q.qlen < dev->tx_queue_len) {
 
        skb = __skb_dequeue(&dat->q);
        if (skb == NULL) {
-               struct net_device *m = dat->m->dev->qdisc->dev;
+               struct net_device *m = qdisc_dev(dat->m->dev->qdisc);
                if (m) {
                        dat->m->slaves = sch;
                        netif_wake_queue(m);
 
 static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
 {
-       struct net_device *dev = sch->dev;
+       struct net_device *dev = qdisc_dev(sch);
        struct teql_master *m = (struct teql_master*)sch->ops;
        struct teql_sched_data *q = qdisc_priv(sch);
 
                goto drop;
 
        do {
-               struct net_device *slave = q->dev;
+               struct net_device *slave = qdisc_dev(q);
 
                if (slave->qdisc_sleeping != q)
                        continue;
 
        q = m->slaves;
        do {
-               struct net_device *slave = q->dev;
+               struct net_device *slave = qdisc_dev(q);
 
                if (slave == NULL)
                        return -EUNATCH;
        q = m->slaves;
        if (q) {
                do {
-                       if (new_mtu > q->dev->mtu)
+                       if (new_mtu > qdisc_dev(q)->mtu)
                                return -EINVAL;
                } while ((q=NEXT_SLAVE(q)) != m->slaves);
        }