#define NET_XMIT_BYPASS                4       /* packet does not leave via dequeue;
                                           (TC use only - dev_queue_xmit
                                           returns this as NET_XMIT_SUCCESS) */
+#define NET_XMIT_MASK          0xFFFF  /* qdisc flags in net/sch_generic.h */
 
 /* Backlog congestion levels */
 #define NET_RX_SUCCESS         0   /* keep 'em coming, baby */
 
        return qdisc_skb_cb(skb)->pkt_len;
 }
 
+#ifdef CONFIG_NET_CLS_ACT
+/* additional qdisc xmit flags */
+enum net_xmit_qdisc_t {
+       __NET_XMIT_STOLEN = 0x00010000,
+};
+
+#define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
+
+#else
+#define net_xmit_drop_count(e) (1)
+#endif
+
 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
 #ifdef CONFIG_NET_SCHED
 static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
 {
        qdisc_skb_cb(skb)->pkt_len = skb->len;
-       return qdisc_enqueue(skb, sch);
+       return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
 }
 
 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
 
                case TC_ACT_QUEUED:
                case TC_ACT_STOLEN:
                        kfree_skb(skb);
-                       return NET_XMIT_SUCCESS;
+                       return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
                case TC_ACT_SHOT:
                        kfree_skb(skb);
                        goto drop;
        ret = qdisc_enqueue(skb, flow->q);
        if (ret != 0) {
 drop: __maybe_unused
-               sch->qstats.drops++;
-               if (flow)
-                       flow->qstats.drops++;
+               if (net_xmit_drop_count(ret)) {
+                       sch->qstats.drops++;
+                       if (flow)
+                               flow->qstats.drops++;
+               }
                return ret;
        }
        sch->bstats.bytes += qdisc_pkt_len(skb);
        if (!ret) {
                sch->q.qlen++;
                sch->qstats.requeues++;
-       } else {
+       } else if (net_xmit_drop_count(ret)) {
                sch->qstats.drops++;
                p->link.qstats.drops++;
        }
 
                switch (result) {
                case TC_ACT_QUEUED:
                case TC_ACT_STOLEN:
-                       *qerr = NET_XMIT_SUCCESS;
+                       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
                case TC_ACT_SHOT:
                        return NULL;
                case TC_ACT_RECLASSIFY:
                return ret;
        }
 
-       sch->qstats.drops++;
-       cbq_mark_toplevel(q, cl);
-       cl->qstats.drops++;
+       if (net_xmit_drop_count(ret)) {
+               sch->qstats.drops++;
+               cbq_mark_toplevel(q, cl);
+               cl->qstats.drops++;
+       }
        return ret;
 }
 
                        cbq_activate_class(cl);
                return 0;
        }
-       sch->qstats.drops++;
-       cl->qstats.drops++;
+       if (net_xmit_drop_count(ret)) {
+               sch->qstats.drops++;
+               cl->qstats.drops++;
+       }
        return ret;
 }
 
        q->rx_class = NULL;
 
        if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
+               int ret;
 
                cbq_mark_toplevel(q, cl);
 
                q->rx_class = cl;
                cl->q->__parent = sch;
 
-               if (qdisc_enqueue(skb, cl->q) == 0) {
+               ret = qdisc_enqueue(skb, cl->q);
+               if (ret == NET_XMIT_SUCCESS) {
                        sch->q.qlen++;
                        sch->bstats.packets++;
                        sch->bstats.bytes += qdisc_pkt_len(skb);
                                cbq_activate_class(cl);
                        return 0;
                }
-               sch->qstats.drops++;
+               if (net_xmit_drop_count(ret))
+                       sch->qstats.drops++;
                return 0;
        }
 
 
                case TC_ACT_QUEUED:
                case TC_ACT_STOLEN:
                        kfree_skb(skb);
-                       return NET_XMIT_SUCCESS;
+                       return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 
                case TC_ACT_SHOT:
                        goto drop;
 
        err = qdisc_enqueue(skb, p->q);
        if (err != NET_XMIT_SUCCESS) {
-               sch->qstats.drops++;
+               if (net_xmit_drop_count(err))
+                       sch->qstats.drops++;
                return err;
        }
 
 
        err = p->q->ops->requeue(skb, p->q);
        if (err != NET_XMIT_SUCCESS) {
-               sch->qstats.drops++;
+               if (net_xmit_drop_count(err))
+                       sch->qstats.drops++;
                return err;
        }
 
 
                switch (result) {
                case TC_ACT_QUEUED:
                case TC_ACT_STOLEN:
-                       *qerr = NET_XMIT_SUCCESS;
+                       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
                case TC_ACT_SHOT:
                        return NULL;
                }
 
        err = qdisc_enqueue(skb, cl->qdisc);
        if (unlikely(err != NET_XMIT_SUCCESS)) {
-               cl->qstats.drops++;
-               sch->qstats.drops++;
+               if (net_xmit_drop_count(err)) {
+                       cl->qstats.drops++;
+                       sch->qstats.drops++;
+               }
                return err;
        }
 
 
                switch (result) {
                case TC_ACT_QUEUED:
                case TC_ACT_STOLEN:
-                       *qerr = NET_XMIT_SUCCESS;
+                       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
                case TC_ACT_SHOT:
                        return NULL;
                }
                kfree_skb(skb);
                return ret;
 #endif
-       } else if (qdisc_enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
-               sch->qstats.drops++;
-               cl->qstats.drops++;
+       } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
+               if (net_xmit_drop_count(ret)) {
+                       sch->qstats.drops++;
+                       cl->qstats.drops++;
+               }
                return NET_XMIT_DROP;
        } else {
                cl->bstats.packets +=
                kfree_skb(skb);
                return ret;
 #endif
-       } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
+       } else if ((ret = cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q)) !=
                   NET_XMIT_SUCCESS) {
-               sch->qstats.drops++;
-               cl->qstats.drops++;
+               if (net_xmit_drop_count(ret)) {
+                       sch->qstats.drops++;
+                       cl->qstats.drops++;
+               }
                return NET_XMIT_DROP;
        } else
                htb_activate(q, cl);
 
                sch->q.qlen++;
                sch->bstats.bytes += qdisc_pkt_len(skb);
                sch->bstats.packets++;
-       } else
+       } else if (net_xmit_drop_count(ret)) {
                sch->qstats.drops++;
+       }
 
        pr_debug("netem: enqueue ret %d\n", ret);
        return ret;
 
                switch (err) {
                case TC_ACT_STOLEN:
                case TC_ACT_QUEUED:
-                       *qerr = NET_XMIT_SUCCESS;
+                       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
                case TC_ACT_SHOT:
                        return NULL;
                }
                sch->q.qlen++;
                return NET_XMIT_SUCCESS;
        }
-       sch->qstats.drops++;
+       if (net_xmit_drop_count(ret))
+               sch->qstats.drops++;
        return ret;
 }
 
                sch->qstats.requeues++;
                return 0;
        }
-       sch->qstats.drops++;
+       if (net_xmit_drop_count(ret))
+               sch->qstats.drops++;
        return NET_XMIT_DROP;
 }
 
 
                sch->bstats.bytes += qdisc_pkt_len(skb);
                sch->bstats.packets++;
                sch->q.qlen++;
-       } else {
+       } else if (net_xmit_drop_count(ret)) {
                q->stats.pdrop++;
                sch->qstats.drops++;
        }
 
                switch (result) {
                case TC_ACT_STOLEN:
                case TC_ACT_QUEUED:
-                       *qerr = NET_XMIT_SUCCESS;
+                       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
                case TC_ACT_SHOT:
                        return 0;
                }
 
 
        ret = qdisc_enqueue(skb, q->qdisc);
        if (ret != 0) {
-               sch->qstats.drops++;
+               if (net_xmit_drop_count(ret))
+                       sch->qstats.drops++;
                return ret;
        }