SCHED_FEAT_NEW_FAIR_SLEEPERS    = 1,
        SCHED_FEAT_WAKEUP_PREEMPT       = 2,
        SCHED_FEAT_START_DEBIT          = 4,
-       SCHED_FEAT_TREE_AVG             = 8,
-       SCHED_FEAT_APPROX_AVG           = 16,
-       SCHED_FEAT_HRTICK               = 32,
-       SCHED_FEAT_DOUBLE_TICK          = 64,
+       SCHED_FEAT_HRTICK               = 8,
+       SCHED_FEAT_DOUBLE_TICK          = 16,
 };
 
 const_debug unsigned int sysctl_sched_features =
                SCHED_FEAT_NEW_FAIR_SLEEPERS    * 1 |
                SCHED_FEAT_WAKEUP_PREEMPT       * 1 |
                SCHED_FEAT_START_DEBIT          * 1 |
-               SCHED_FEAT_TREE_AVG             * 0 |
-               SCHED_FEAT_APPROX_AVG           * 0 |
                SCHED_FEAT_HRTICK               * 1 |
                SCHED_FEAT_DOUBLE_TICK          * 0;
 
 
        return vslice;
 }
 
-static u64 sched_vslice(struct cfs_rq *cfs_rq)
-{
-       return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running);
-}
-
 static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        return __sched_vslice(cfs_rq->load.weight + se->load.weight,
        } else
                vruntime = cfs_rq->min_vruntime;
 
-       if (sched_feat(TREE_AVG)) {
-               struct sched_entity *last = __pick_last_entity(cfs_rq);
-               if (last) {
-                       vruntime += last->vruntime;
-                       vruntime >>= 1;
-               }
-       } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
-               vruntime += sched_vslice(cfs_rq)/2;
-
        /*
         * The 'current' period is already promised to the current tasks,
         * however the extra weight of the new task will slow them down a