{
        struct gred_sched_data *tab[MAX_DPs];
        unsigned long   flags;
+       u32             red_flags;
        u32             DPs;
        u32             def;
        struct red_parms wred_set;
        table->wred_set.qavg = q->parms.qavg;
 }
 
+static inline int gred_use_ecn(struct gred_sched *t)
+{
+       return t->red_flags & TC_RED_ECN;
+}
+
 static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
 {
        struct gred_sched_data *q=NULL;
 
                case RED_PROB_MARK:
                        sch->qstats.overlimits++;
-                       q->stats.prob_drop++;
-                       goto congestion_drop;
+                       if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
+                               q->stats.prob_drop++;
+                               goto congestion_drop;
+                       }
+
+                       q->stats.prob_mark++;
+                       break;
 
                case RED_HARD_MARK:
                        sch->qstats.overlimits++;
-                       q->stats.forced_drop++;
-                       goto congestion_drop;
+                       if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
+                               q->stats.forced_drop++;
+                               goto congestion_drop;
+                       }
+                       q->stats.forced_mark++;
+                       break;
        }
 
        if (q->backlog + skb->len <= q->limit) {
        sch_tree_lock(sch);
        table->DPs = sopt->DPs;
        table->def = sopt->def_DP;
+       table->red_flags = sopt->flags;
 
        /*
         * Every entry point to GRED is synchronized with the above code
                .DPs    = table->DPs,
                .def_DP = table->def,
                .grio   = gred_rio_mode(table),
+               .flags  = table->red_flags,
        };
 
        opts = RTA_NEST(skb, TCA_OPTIONS);