#include <linux/linux_logo.h>
#include <asm/spu.h>
#include <asm/spu_priv1.h>
+#include <asm/spu_csa.h>
#include <asm/xmon.h>
#include <asm/prom.h>
-#include "spu_priv1_mmio.h"
const struct spu_management_ops *spu_management_ops;
EXPORT_SYMBOL_GPL(spu_management_ops);
struct cbe_spu_info cbe_spu_info[MAX_NUMNODES];
EXPORT_SYMBOL_GPL(cbe_spu_info);
+/*
+ * The spufs fault-handling code needs to call force_sig_info to raise signals
+ * on DMA errors. Export it here to avoid general kernel-wide access to this
+ * function
+ */
+EXPORT_SYMBOL_GPL(force_sig_info);
+
/*
* Protects cbe_spu_info and spu->number.
*/
static DEFINE_SPINLOCK(spu_full_list_lock);
static DEFINE_MUTEX(spu_full_list_mutex);
+struct spu_slb {
+ u64 esid, vsid;
+};
+
void spu_invalidate_slbs(struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
}
EXPORT_SYMBOL_GPL(spu_associate_mm);
-static int __spu_trap_invalid_dma(struct spu *spu)
+int spu_64k_pages_available(void)
{
- pr_debug("%s\n", __FUNCTION__);
- spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
- return 0;
+ return mmu_psize_defs[MMU_PAGE_64K].shift != 0;
}
+EXPORT_SYMBOL_GPL(spu_64k_pages_available);
-static int __spu_trap_dma_align(struct spu *spu)
+static void spu_restart_dma(struct spu *spu)
{
- pr_debug("%s\n", __FUNCTION__);
- spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
- return 0;
-}
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
-static int __spu_trap_error(struct spu *spu)
-{
- pr_debug("%s\n", __FUNCTION__);
- spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
- return 0;
+ if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
+ out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
}
-static void spu_restart_dma(struct spu *spu)
+static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
- if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
- out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
+ pr_debug("%s: adding SLB[%d] 0x%016lx 0x%016lx\n",
+ __func__, slbe, slb->vsid, slb->esid);
+
+ out_be64(&priv2->slb_index_W, slbe);
+ out_be64(&priv2->slb_vsid_RW, slb->vsid);
+ out_be64(&priv2->slb_esid_RW, slb->esid);
}
static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
{
- struct spu_priv2 __iomem *priv2 = spu->priv2;
struct mm_struct *mm = spu->mm;
- u64 esid, vsid, llp;
+ struct spu_slb slb;
int psize;
pr_debug("%s\n", __FUNCTION__);
printk("%s: invalid access during switch!\n", __func__);
return 1;
}
- esid = (ea & ESID_MASK) | SLB_ESID_V;
+ slb.esid = (ea & ESID_MASK) | SLB_ESID_V;
switch(REGION_ID(ea)) {
case USER_REGION_ID:
#else
psize = mm->context.user_psize;
#endif
- vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
- SLB_VSID_USER;
+ slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M)
+ << SLB_VSID_SHIFT) | SLB_VSID_USER;
break;
case VMALLOC_REGION_ID:
if (ea < VMALLOC_END)
psize = mmu_vmalloc_psize;
else
psize = mmu_io_psize;
- vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
- SLB_VSID_KERNEL;
+ slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
+ << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
break;
case KERNEL_REGION_ID:
psize = mmu_linear_psize;
- vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
- SLB_VSID_KERNEL;
+ slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
+ << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
break;
default:
/* Future: support kernel segments so that drivers
pr_debug("invalid region access at %016lx\n", ea);
return 1;
}
- llp = mmu_psize_defs[psize].sllp;
+ slb.vsid |= mmu_psize_defs[psize].sllp;
- out_be64(&priv2->slb_index_W, spu->slb_replace);
- out_be64(&priv2->slb_vsid_RW, vsid | llp);
- out_be64(&priv2->slb_esid_RW, esid);
+ spu_load_slb(spu, spu->slb_replace, &slb);
spu->slb_replace++;
if (spu->slb_replace >= 8)
return 1;
}
+ spu->class_0_pending = 0;
spu->dar = ea;
spu->dsisr = dsisr;
- mb();
+
spu->stop_callback(spu);
+
return 0;
}
-static irqreturn_t
-spu_irq_class_0(int irq, void *data)
+static void __spu_kernel_slb(void *addr, struct spu_slb *slb)
{
- struct spu *spu;
+ unsigned long ea = (unsigned long)addr;
+ u64 llp;
- spu = data;
- spu->class_0_pending = 1;
- spu->stop_callback(spu);
+ if (REGION_ID(ea) == KERNEL_REGION_ID)
+ llp = mmu_psize_defs[mmu_linear_psize].sllp;
+ else
+ llp = mmu_psize_defs[mmu_virtual_psize].sllp;
- return IRQ_HANDLED;
+ slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
+ SLB_VSID_KERNEL | llp;
+ slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
}
-int
-spu_irq_class_0_bottom(struct spu *spu)
+/**
+ * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the
+ * address @new_addr is present.
+ */
+static inline int __slb_present(struct spu_slb *slbs, int nr_slbs,
+ void *new_addr)
{
- unsigned long stat, mask;
- unsigned long flags;
+ unsigned long ea = (unsigned long)new_addr;
+ int i;
- spu->class_0_pending = 0;
+ for (i = 0; i < nr_slbs; i++)
+ if (!((slbs[i].esid ^ ea) & ESID_MASK))
+ return 1;
- spin_lock_irqsave(&spu->register_lock, flags);
- mask = spu_int_mask_get(spu, 0);
- stat = spu_int_stat_get(spu, 0);
+ return 0;
+}
- stat &= mask;
+/**
+ * Setup the SPU kernel SLBs, in preparation for a context save/restore. We
+ * need to map both the context save area, and the save/restore code.
+ *
+ * Because the lscsa and code may cross segment boundaires, we check to see
+ * if mappings are required for the start and end of each range. We currently
+ * assume that the mappings are smaller that one segment - if not, something
+ * is seriously wrong.
+ */
+void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
+ void *code, int code_size)
+{
+ struct spu_slb slbs[4];
+ int i, nr_slbs = 0;
+ /* start and end addresses of both mappings */
+ void *addrs[] = {
+ lscsa, (void *)lscsa + sizeof(*lscsa) - 1,
+ code, code + code_size - 1
+ };
- if (stat & 1) /* invalid DMA alignment */
- __spu_trap_dma_align(spu);
+ /* check the set of addresses, and create a new entry in the slbs array
+ * if there isn't already a SLB for that address */
+ for (i = 0; i < ARRAY_SIZE(addrs); i++) {
+ if (__slb_present(slbs, nr_slbs, addrs[i]))
+ continue;
+
+ __spu_kernel_slb(addrs[i], &slbs[nr_slbs]);
+ nr_slbs++;
+ }
+
+ /* Add the set of SLBs */
+ for (i = 0; i < nr_slbs; i++)
+ spu_load_slb(spu, i, &slbs[i]);
+}
+EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs);
+
+static irqreturn_t
+spu_irq_class_0(int irq, void *data)
+{
+ struct spu *spu;
+ unsigned long stat, mask;
+
+ spu = data;
+
+ spin_lock(&spu->register_lock);
+ mask = spu_int_mask_get(spu, 0);
+ stat = spu_int_stat_get(spu, 0) & mask;
- if (stat & 2) /* invalid MFC DMA */
- __spu_trap_invalid_dma(spu);
+ spu->class_0_pending |= stat;
+ spu->dsisr = spu_mfc_dsisr_get(spu);
+ spu->dar = spu_mfc_dar_get(spu);
+ spin_unlock(&spu->register_lock);
- if (stat & 4) /* error on SPU */
- __spu_trap_error(spu);
+ spu->stop_callback(spu);
spu_int_stat_clear(spu, 0, stat);
- spin_unlock_irqrestore(&spu->register_lock, flags);
- return (stat & 0x7) ? -EIO : 0;
+ return IRQ_HANDLED;
}
-EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
static irqreturn_t
spu_irq_class_1(int irq, void *data)
stat = spu_int_stat_get(spu, 1) & mask;
dar = spu_mfc_dar_get(spu);
dsisr = spu_mfc_dsisr_get(spu);
- if (stat & 2) /* mapping fault */
+ if (stat & CLASS1_STORAGE_FAULT_INTR)
spu_mfc_dsisr_set(spu, 0ul);
spu_int_stat_clear(spu, 1, stat);
spin_unlock(&spu->register_lock);
pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
dar, dsisr);
- if (stat & 1) /* segment fault */
+ if (stat & CLASS1_SEGMENT_FAULT_INTR)
__spu_trap_data_seg(spu, dar);
- if (stat & 2) { /* mapping fault */
+ if (stat & CLASS1_STORAGE_FAULT_INTR)
__spu_trap_data_map(spu, dar, dsisr);
- }
- if (stat & 4) /* ls compare & suspend on get */
+ if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR)
;
- if (stat & 8) /* ls compare & suspend on put */
+ if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR)
;
return stat ? IRQ_HANDLED : IRQ_NONE;
struct spu *spu;
unsigned long stat;
unsigned long mask;
+ const int mailbox_intrs =
+ CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR;
spu = data;
spin_lock(&spu->register_lock);
mask = spu_int_mask_get(spu, 2);
/* ignore interrupts we're not waiting for */
stat &= mask;
- /*
- * mailbox interrupts (0x1 and 0x10) are level triggered.
- * mask them now before acknowledging.
- */
- if (stat & 0x11)
- spu_int_mask_and(spu, 2, ~(stat & 0x11));
+
+ /* mailbox interrupts are level triggered. mask them now before
+ * acknowledging */
+ if (stat & mailbox_intrs)
+ spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs));
/* acknowledge all interrupts before the callbacks */
spu_int_stat_clear(spu, 2, stat);
spin_unlock(&spu->register_lock);
pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
- if (stat & 1) /* PPC core mailbox */
+ if (stat & CLASS2_MAILBOX_INTR)
spu->ibox_callback(spu);
- if (stat & 2) /* SPU stop-and-signal */
+ if (stat & CLASS2_SPU_STOP_INTR)
spu->stop_callback(spu);
- if (stat & 4) /* SPU halted */
+ if (stat & CLASS2_SPU_HALT_INTR)
spu->stop_callback(spu);
- if (stat & 8) /* DMA tag group complete */
+ if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR)
spu->mfc_callback(spu);
- if (stat & 0x10) /* SPU mailbox threshold */
+ if (stat & CLASS2_MAILBOX_THRESHOLD_INTR)
spu->wbox_callback(spu);
spu->stats.class2_intr++;
return 0;
}
-struct sysdev_class spu_sysdev_class = {
+static struct sysdev_class spu_sysdev_class = {
set_kset_name("spu"),
.shutdown = spu_shutdown,
};
int spu_add_sysdev_attr_group(struct attribute_group *attrs)
{
struct spu *spu;
+ int rc = 0;
mutex_lock(&spu_full_list_mutex);
- list_for_each_entry(spu, &spu_full_list, full_list)
- sysfs_create_group(&spu->sysdev.kobj, attrs);
+ list_for_each_entry(spu, &spu_full_list, full_list) {
+ rc = sysfs_create_group(&spu->sysdev.kobj, attrs);
+
+ /* we're in trouble here, but try unwinding anyway */
+ if (rc) {
+ printk(KERN_ERR "%s: can't create sysfs group '%s'\n",
+ __func__, attrs->name);
+
+ list_for_each_entry_continue_reverse(spu,
+ &spu_full_list, full_list)
+ sysfs_remove_group(&spu->sysdev.kobj, attrs);
+ break;
+ }
+ }
+
mutex_unlock(&spu_full_list_mutex);
- return 0;
+ return rc;
}
EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL);
-/* Hardcoded affinity idxs for QS20 */
-#define SPES_PER_BE 8
-static int QS20_reg_idxs[SPES_PER_BE] = { 0, 2, 4, 6, 7, 5, 3, 1 };
-static int QS20_reg_memory[SPES_PER_BE] = { 1, 1, 0, 0, 0, 0, 0, 0 };
-
-static struct spu *spu_lookup_reg(int node, u32 reg)
-{
- struct spu *spu;
-
- list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
- if (*(u32 *)get_property(spu_devnode(spu), "reg", NULL) == reg)
- return spu;
- }
- return NULL;
-}
-
-static void init_aff_QS20_harcoded(void)
-{
- int node, i;
- struct spu *last_spu, *spu;
- u32 reg;
-
- for (node = 0; node < MAX_NUMNODES; node++) {
- last_spu = NULL;
- for (i = 0; i < SPES_PER_BE; i++) {
- reg = QS20_reg_idxs[i];
- spu = spu_lookup_reg(node, reg);
- if (!spu)
- continue;
- spu->has_mem_affinity = QS20_reg_memory[reg];
- if (last_spu)
- list_add_tail(&spu->aff_list,
- &last_spu->aff_list);
- last_spu = spu;
- }
- }
-}
-
-static int of_has_vicinity(void)
-{
- struct spu* spu;
-
- spu = list_entry(cbe_spu_info[0].spus.next, struct spu, cbe_list);
- return of_find_property(spu_devnode(spu), "vicinity", NULL) != NULL;
-}
-
-static struct spu *aff_devnode_spu(int cbe, struct device_node *dn)
-{
- struct spu *spu;
-
- list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list)
- if (spu_devnode(spu) == dn)
- return spu;
- return NULL;
-}
-
-static struct spu *
-aff_node_next_to(int cbe, struct device_node *target, struct device_node *avoid)
-{
- struct spu *spu;
- const phandle *vic_handles;
- int lenp, i;
-
- list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) {
- if (spu_devnode(spu) == avoid)
- continue;
- vic_handles = get_property(spu_devnode(spu), "vicinity", &lenp);
- for (i=0; i < (lenp / sizeof(phandle)); i++) {
- if (vic_handles[i] == target->linux_phandle)
- return spu;
- }
- }
- return NULL;
-}
-
-static void init_aff_fw_vicinity_node(int cbe)
-{
- struct spu *spu, *last_spu;
- struct device_node *vic_dn, *last_spu_dn;
- phandle avoid_ph;
- const phandle *vic_handles;
- const char *name;
- int lenp, i, added, mem_aff;
-
- last_spu = list_entry(cbe_spu_info[cbe].spus.next, struct spu, cbe_list);
- avoid_ph = 0;
- for (added = 1; added < cbe_spu_info[cbe].n_spus; added++) {
- last_spu_dn = spu_devnode(last_spu);
- vic_handles = get_property(last_spu_dn, "vicinity", &lenp);
-
- for (i = 0; i < (lenp / sizeof(phandle)); i++) {
- if (vic_handles[i] == avoid_ph)
- continue;
-
- vic_dn = of_find_node_by_phandle(vic_handles[i]);
- if (!vic_dn)
- continue;
-
- name = get_property(vic_dn, "name", NULL);
- if (strcmp(name, "spe") == 0) {
- spu = aff_devnode_spu(cbe, vic_dn);
- avoid_ph = last_spu_dn->linux_phandle;
- }
- else {
- mem_aff = strcmp(name, "mic-tm") == 0;
- spu = aff_node_next_to(cbe, vic_dn, last_spu_dn);
- if (!spu)
- continue;
- if (mem_aff) {
- last_spu->has_mem_affinity = 1;
- spu->has_mem_affinity = 1;
- }
- avoid_ph = vic_dn->linux_phandle;
- }
- list_add_tail(&spu->aff_list, &last_spu->aff_list);
- last_spu = spu;
- break;
- }
- }
-}
-
-static void init_aff_fw_vicinity(void)
-{
- int cbe;
-
- /* sets has_mem_affinity for each spu, as long as the
- * spu->aff_list list, linking each spu to its neighbors
- */
- for (cbe = 0; cbe < MAX_NUMNODES; cbe++)
- init_aff_fw_vicinity_node(cbe);
-}
-
static int __init init_spu_base(void)
{
int i, ret = 0;
mutex_unlock(&spu_full_list_mutex);
spu_add_sysdev_attr(&attr_stat);
- if (of_has_vicinity()) {
- init_aff_fw_vicinity();
- } else {
- long root = of_get_flat_dt_root();
- if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
- init_aff_QS20_harcoded();
- }
+ spu_init_affinity();
return 0;