struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
        struct rt_prio_array *array = &rt_rq->active;
        struct rt_rq *group_rq = group_rt_rq(rt_se);
 +      struct list_head *queue = array->queue + rt_se_prio(rt_se);
  
-       if (group_rq && rt_rq_throttled(group_rq))
+       /*
+        * Don't enqueue the group if its throttled, or when empty.
+        * The latter is a consequence of the former when a child group
+        * get throttled and the current group doesn't have any other
+        * active members.
+        */
+       if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
                return;
  
 -      list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
 +      if (rt_se->nr_cpus_allowed == 1)
 +              list_add(&rt_se->run_list, queue);
 +      else
 +              list_add_tail(&rt_se->run_list, queue);
 +
        __set_bit(rt_se_prio(rt_se), array->bitmap);
  
        inc_rt_tasks(rt_se, rt_rq);
  void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
  {
        struct rt_prio_array *array = &rt_rq->active;
+       struct list_head *queue = array->queue + rt_se_prio(rt_se);
  
-       list_del_init(&rt_se->run_list);
-       list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
 -      if (on_rt_rq(rt_se))
 -              list_move_tail(&rt_se->run_list, queue);
++      if (on_rt_rq(rt_se)) {
++              list_del_init(&rt_se->run_list);
++              list_add_tail(&rt_se->run_list,
++                            array->queue + rt_se_prio(rt_se));
++      }
  }
  
  static void requeue_task_rt(struct rq *rq, struct task_struct *p)