]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - net/sched/sch_htb.c
Merge branch 'linus' into x86/core
[linux-2.6-omap-h63xx.git] / net / sched / sch_htb.c
index 30c999c61b01f1ab771fb3bb75984b7595fbaee4..d14f02056ae6dcc31dbfad8c90ba57424e4a6d45 100644 (file)
@@ -214,14 +214,14 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
        if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
                return cl;
 
-       *qerr = NET_XMIT_BYPASS;
+       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
        tcf = q->filter_list;
        while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
                switch (result) {
                case TC_ACT_QUEUED:
                case TC_ACT_STOLEN:
-                       *qerr = NET_XMIT_SUCCESS;
+                       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
                case TC_ACT_SHOT:
                        return NULL;
                }
@@ -524,7 +524,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
  */
 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
 {
-       BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
+       WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
 
        if (!cl->prio_activity) {
                cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
@@ -542,7 +542,7 @@ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
  */
 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
 {
-       BUG_TRAP(cl->prio_activity);
+       WARN_ON(!cl->prio_activity);
 
        htb_deactivate_prios(q, cl);
        cl->prio_activity = 0;
@@ -567,15 +567,17 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                }
 #ifdef CONFIG_NET_CLS_ACT
        } else if (!cl) {
-               if (ret == NET_XMIT_BYPASS)
+               if (ret & __NET_XMIT_BYPASS)
                        sch->qstats.drops++;
                kfree_skb(skb);
                return ret;
 #endif
-       } else if (qdisc_enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
-               sch->qstats.drops++;
-               cl->qstats.drops++;
-               return NET_XMIT_DROP;
+       } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
+               if (net_xmit_drop_count(ret)) {
+                       sch->qstats.drops++;
+                       cl->qstats.drops++;
+               }
+               return ret;
        } else {
                cl->bstats.packets +=
                        skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
@@ -610,16 +612,18 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
                }
 #ifdef CONFIG_NET_CLS_ACT
        } else if (!cl) {
-               if (ret == NET_XMIT_BYPASS)
+               if (ret & __NET_XMIT_BYPASS)
                        sch->qstats.drops++;
                kfree_skb(skb);
                return ret;
 #endif
-       } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
+       } else if ((ret = cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q)) !=
                   NET_XMIT_SUCCESS) {
-               sch->qstats.drops++;
-               cl->qstats.drops++;
-               return NET_XMIT_DROP;
+               if (net_xmit_drop_count(ret)) {
+                       sch->qstats.drops++;
+                       cl->qstats.drops++;
+               }
+               return ret;
        } else
                htb_activate(q, cl);
 
@@ -757,7 +761,7 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
                u32 *pid;
        } stk[TC_HTB_MAXDEPTH], *sp = stk;
 
-       BUG_TRAP(tree->rb_node);
+       WARN_ON(!tree->rb_node);
        sp->root = tree->rb_node;
        sp->pptr = pptr;
        sp->pid = pid;
@@ -777,7 +781,7 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
                                *sp->pptr = (*sp->pptr)->rb_left;
                        if (sp > stk) {
                                sp--;
-                               BUG_TRAP(*sp->pptr);
+                               WARN_ON(!*sp->pptr);
                                if (!*sp->pptr)
                                        return NULL;
                                htb_next_rb_node(sp->pptr);
@@ -792,7 +796,7 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
                        sp->pid = cl->un.inner.last_ptr_id + prio;
                }
        }
-       BUG_TRAP(0);
+       WARN_ON(1);
        return NULL;
 }
 
@@ -810,7 +814,7 @@ static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
 
        do {
 next:
-               BUG_TRAP(cl);
+               WARN_ON(!cl);
                if (!cl)
                        return NULL;
 
@@ -1039,7 +1043,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
 
 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
-       spinlock_t *root_lock = qdisc_root_lock(sch);
+       spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
        struct htb_sched *q = qdisc_priv(sch);
        struct nlattr *nest;
        struct tc_htb_glob gopt;
@@ -1071,7 +1075,7 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
                          struct sk_buff *skb, struct tcmsg *tcm)
 {
        struct htb_class *cl = (struct htb_class *)arg;
-       spinlock_t *root_lock = qdisc_root_lock(sch);
+       spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
        struct nlattr *nest;
        struct tc_htb_opt opt;
 
@@ -1185,7 +1189,7 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
 {
        struct htb_class *parent = cl->parent;
 
-       BUG_TRAP(!cl->level && cl->un.leaf.q && !cl->prio_activity);
+       WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
 
        if (parent->cmode != HTB_CAN_SEND)
                htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level);
@@ -1205,7 +1209,7 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
 {
        if (!cl->level) {
-               BUG_TRAP(cl->un.leaf.q);
+               WARN_ON(!cl->un.leaf.q);
                qdisc_destroy(cl->un.leaf.q);
        }
        gen_kill_estimator(&cl->bstats, &cl->rate_est);
@@ -1275,7 +1279,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
 
        /* delete from hash and active; remainder in destroy_class */
        qdisc_class_hash_remove(&q->clhash, &cl->common);
-       cl->parent->children--;
+       if (cl->parent)
+               cl->parent->children--;
 
        if (cl->prio_activity)
                htb_deactivate(q, cl);
@@ -1367,7 +1372,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                        goto failure;
 
                gen_new_estimator(&cl->bstats, &cl->rate_est,
-                                 qdisc_root_lock(sch),
+                                 qdisc_root_sleeping_lock(sch),
                                  tca[TCA_RATE] ? : &est.nla);
                cl->refcnt = 1;
                cl->children = 0;
@@ -1422,7 +1427,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
        } else {
                if (tca[TCA_RATE])
                        gen_replace_estimator(&cl->bstats, &cl->rate_est,
-                                             qdisc_root_lock(sch),
+                                             qdisc_root_sleeping_lock(sch),
                                              tca[TCA_RATE]);
                sch_tree_lock(sch);
        }