const struct spu_priv1_ops *spu_priv1_ops;
 
-static struct list_head spu_list[MAX_NUMNODES];
 static LIST_HEAD(spu_full_list);
 static DEFINE_MUTEX(spu_mutex);
 static DEFINE_SPINLOCK(spu_list_lock);
        struct spu *spu = NULL;
 
        mutex_lock(&spu_mutex);
-       if (!list_empty(&spu_list[node])) {
-               spu = list_entry(spu_list[node].next, struct spu, list);
+       if (!list_empty(&cbe_spu_info[node].free_spus)) {
+               spu = list_entry(cbe_spu_info[node].free_spus.next, struct spu,
+                                                                       list);
                list_del_init(&spu->list);
                pr_debug("Got SPU %d %d\n", spu->number, spu->node);
        }
 void spu_free(struct spu *spu)
 {
        mutex_lock(&spu_mutex);
-       list_add_tail(&spu->list, &spu_list[spu->node]);
+       list_add_tail(&spu->list, &cbe_spu_info[spu->node].free_spus);
        mutex_unlock(&spu_mutex);
 }
 EXPORT_SYMBOL_GPL(spu_free);
 
        mutex_lock(&spu_mutex);
        spin_lock_irqsave(&spu_list_lock, flags);
-       list_add(&spu->list, &spu_list[spu->node]);
+       list_add(&spu->list, &cbe_spu_info[spu->node].free_spus);
+       list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus);
+       cbe_spu_info[spu->node].n_spus++;
        list_add(&spu->full_list, &spu_full_list);
        spin_unlock_irqrestore(&spu_list_lock, flags);
        mutex_unlock(&spu_mutex);
 
 static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL);
 
+struct cbe_spu_info cbe_spu_info[MAX_NUMNODES];
+EXPORT_SYMBOL_GPL(cbe_spu_info);
+
 static int __init init_spu_base(void)
 {
        int i, ret = 0;
 
-       for (i = 0; i < MAX_NUMNODES; i++)
-               INIT_LIST_HEAD(&spu_list[i]);
+       for (i = 0; i < MAX_NUMNODES; i++) {
+               INIT_LIST_HEAD(&cbe_spu_info[i].spus);
+               INIT_LIST_HEAD(&cbe_spu_info[i].free_spus);
+       }
 
        if (!spu_management_ops)
                goto out;
 
                 spu->number, spu->node);
        spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
 
+       if (ctx->flags & SPU_CREATE_NOSCHED)
+               atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
+
        ctx->stats.slb_flt_base = spu->stats.slb_flt;
        ctx->stats.class2_intr_base = spu->stats.class2_intr;
 
                 spu->pid, spu->number, spu->node);
        spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
 
+       if (spu->ctx->flags & SPU_CREATE_NOSCHED)
+               atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
        spu_switch_notify(spu, NULL);
        spu_unmap_mappings(ctx);
        spu_save(&ctx->csa, spu);
 
        struct spu_problem __iomem *problem;
        struct spu_priv2 __iomem *priv2;
        struct list_head list;
+       struct list_head cbe_list;
        struct list_head sched_list;
        struct list_head full_list;
        int number;
        } stats;
 };
 
+struct cbe_spu_info {
+       struct list_head spus;
+       struct list_head free_spus;
+       int n_spus;
+       atomic_t reserved_spus;
+};
+
+extern struct cbe_spu_info cbe_spu_info[];
+
 struct spu *spu_alloc(void);
 struct spu *spu_alloc_node(int node);
 void spu_free(struct spu *spu);