]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/sparc64/kernel/pci_fire.c
Merge branch 'eseries' into pxa
[linux-2.6-omap-h63xx.git] / arch / sparc64 / kernel / pci_fire.c
index 14d67fe21ab2c1a4aa8343a97352e0ab7573f0c7..d23bb6f53cdac8e09225f7a8b95c01f6d40a38be 100644 (file)
@@ -6,9 +6,12 @@
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/init.h>
+#include <linux/msi.h>
+#include <linux/irq.h>
 
 #include <asm/oplib.h>
 #include <asm/prom.h>
+#include <asm/irq.h>
 
 #include "pci_impl.h"
 
@@ -27,7 +30,7 @@
                               "i" (ASI_PHYS_BYPASS_EC_E) \
                             : "memory")
 
-static void pci_fire_scan_bus(struct pci_pbm_info *pbm)
+static void __init pci_fire_scan_bus(struct pci_pbm_info *pbm)
 {
        pbm->pci_bus = pci_scan_one_pbm(pbm);
 
@@ -68,7 +71,8 @@ static int pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm)
         */
        fire_write(iommu->iommu_flushinv, ~(u64)0);
 
-       err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask);
+       err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask,
+                              pbm->numa_node);
        if (err)
                return err;
 
@@ -84,6 +88,266 @@ static int pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm)
        return 0;
 }
 
+#ifdef CONFIG_PCI_MSI
+struct pci_msiq_entry {
+       u64             word0;
+#define MSIQ_WORD0_RESV                        0x8000000000000000UL
+#define MSIQ_WORD0_FMT_TYPE            0x7f00000000000000UL
+#define MSIQ_WORD0_FMT_TYPE_SHIFT      56
+#define MSIQ_WORD0_LEN                 0x00ffc00000000000UL
+#define MSIQ_WORD0_LEN_SHIFT           46
+#define MSIQ_WORD0_ADDR0               0x00003fff00000000UL
+#define MSIQ_WORD0_ADDR0_SHIFT         32
+#define MSIQ_WORD0_RID                 0x00000000ffff0000UL
+#define MSIQ_WORD0_RID_SHIFT           16
+#define MSIQ_WORD0_DATA0               0x000000000000ffffUL
+#define MSIQ_WORD0_DATA0_SHIFT         0
+
+#define MSIQ_TYPE_MSG                  0x6
+#define MSIQ_TYPE_MSI32                        0xb
+#define MSIQ_TYPE_MSI64                        0xf
+
+       u64             word1;
+#define MSIQ_WORD1_ADDR1               0xffffffffffff0000UL
+#define MSIQ_WORD1_ADDR1_SHIFT         16
+#define MSIQ_WORD1_DATA1               0x000000000000ffffUL
+#define MSIQ_WORD1_DATA1_SHIFT         0
+
+       u64             resv[6];
+};
+
+/* All MSI registers are offset from pbm->pbm_regs */
+#define EVENT_QUEUE_BASE_ADDR_REG      0x010000UL
+#define  EVENT_QUEUE_BASE_ADDR_ALL_ONES        0xfffc000000000000UL
+
+#define EVENT_QUEUE_CONTROL_SET(EQ)    (0x011000UL + (EQ) * 0x8UL)
+#define  EVENT_QUEUE_CONTROL_SET_OFLOW 0x0200000000000000UL
+#define  EVENT_QUEUE_CONTROL_SET_EN    0x0000100000000000UL
+
+#define EVENT_QUEUE_CONTROL_CLEAR(EQ)  (0x011200UL + (EQ) * 0x8UL)
+#define  EVENT_QUEUE_CONTROL_CLEAR_OF  0x0200000000000000UL
+#define  EVENT_QUEUE_CONTROL_CLEAR_E2I 0x0000800000000000UL
+#define  EVENT_QUEUE_CONTROL_CLEAR_DIS 0x0000100000000000UL
+
+#define EVENT_QUEUE_STATE(EQ)          (0x011400UL + (EQ) * 0x8UL)
+#define  EVENT_QUEUE_STATE_MASK                0x0000000000000007UL
+#define  EVENT_QUEUE_STATE_IDLE                0x0000000000000001UL
+#define  EVENT_QUEUE_STATE_ACTIVE      0x0000000000000002UL
+#define  EVENT_QUEUE_STATE_ERROR       0x0000000000000004UL
+
+#define EVENT_QUEUE_TAIL(EQ)           (0x011600UL + (EQ) * 0x8UL)
+#define  EVENT_QUEUE_TAIL_OFLOW                0x0200000000000000UL
+#define  EVENT_QUEUE_TAIL_VAL          0x000000000000007fUL
+
+#define EVENT_QUEUE_HEAD(EQ)           (0x011800UL + (EQ) * 0x8UL)
+#define  EVENT_QUEUE_HEAD_VAL          0x000000000000007fUL
+
+#define MSI_MAP(MSI)                   (0x020000UL + (MSI) * 0x8UL)
+#define  MSI_MAP_VALID                 0x8000000000000000UL
+#define  MSI_MAP_EQWR_N                        0x4000000000000000UL
+#define  MSI_MAP_EQNUM                 0x000000000000003fUL
+
+#define MSI_CLEAR(MSI)                 (0x028000UL + (MSI) * 0x8UL)
+#define  MSI_CLEAR_EQWR_N              0x4000000000000000UL
+
+#define IMONDO_DATA0                   0x02C000UL
+#define  IMONDO_DATA0_DATA             0xffffffffffffffc0UL
+
+#define IMONDO_DATA1                   0x02C008UL
+#define  IMONDO_DATA1_DATA             0xffffffffffffffffUL
+
+#define MSI_32BIT_ADDR                 0x034000UL
+#define  MSI_32BIT_ADDR_VAL            0x00000000ffff0000UL
+
+#define MSI_64BIT_ADDR                 0x034008UL
+#define  MSI_64BIT_ADDR_VAL            0xffffffffffff0000UL
+
+static int pci_fire_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
+                            unsigned long *head)
+{
+       *head = fire_read(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
+       return 0;
+}
+
+static int pci_fire_dequeue_msi(struct pci_pbm_info *pbm, unsigned long msiqid,
+                               unsigned long *head, unsigned long *msi)
+{
+       unsigned long type_fmt, type, msi_num;
+       struct pci_msiq_entry *base, *ep;
+
+       base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 8192));
+       ep = &base[*head];
+
+       if ((ep->word0 & MSIQ_WORD0_FMT_TYPE) == 0)
+               return 0;
+
+       type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >>
+                   MSIQ_WORD0_FMT_TYPE_SHIFT);
+       type = (type_fmt >> 3);
+       if (unlikely(type != MSIQ_TYPE_MSI32 &&
+                    type != MSIQ_TYPE_MSI64))
+               return -EINVAL;
+
+       *msi = msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >>
+                         MSIQ_WORD0_DATA0_SHIFT);
+
+       fire_write(pbm->pbm_regs + MSI_CLEAR(msi_num),
+                  MSI_CLEAR_EQWR_N);
+
+       /* Clear the entry.  */
+       ep->word0 &= ~MSIQ_WORD0_FMT_TYPE;
+
+       /* Go to next entry in ring.  */
+       (*head)++;
+       if (*head >= pbm->msiq_ent_count)
+               *head = 0;
+
+       return 1;
+}
+
+static int pci_fire_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
+                            unsigned long head)
+{
+       fire_write(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid), head);
+       return 0;
+}
+
+static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
+                             unsigned long msi, int is_msi64)
+{
+       u64 val;
+
+       val = fire_read(pbm->pbm_regs + MSI_MAP(msi));
+       val &= ~(MSI_MAP_EQNUM);
+       val |= msiqid;
+       fire_write(pbm->pbm_regs + MSI_MAP(msi), val);
+
+       fire_write(pbm->pbm_regs + MSI_CLEAR(msi),
+                  MSI_CLEAR_EQWR_N);
+
+       val = fire_read(pbm->pbm_regs + MSI_MAP(msi));
+       val |= MSI_MAP_VALID;
+       fire_write(pbm->pbm_regs + MSI_MAP(msi), val);
+
+       return 0;
+}
+
+static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
+{
+       unsigned long msiqid;
+       u64 val;
+
+       val = fire_read(pbm->pbm_regs + MSI_MAP(msi));
+       msiqid = (val & MSI_MAP_EQNUM);
+
+       val &= ~MSI_MAP_VALID;
+
+       fire_write(pbm->pbm_regs + MSI_MAP(msi), val);
+
+       return 0;
+}
+
+static int pci_fire_msiq_alloc(struct pci_pbm_info *pbm)
+{
+       unsigned long pages, order, i;
+
+       order = get_order(512 * 1024);
+       pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
+       if (pages == 0UL) {
+               printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
+                      order);
+               return -ENOMEM;
+       }
+       memset((char *)pages, 0, PAGE_SIZE << order);
+       pbm->msi_queues = (void *) pages;
+
+       fire_write(pbm->pbm_regs + EVENT_QUEUE_BASE_ADDR_REG,
+                  (EVENT_QUEUE_BASE_ADDR_ALL_ONES |
+                   __pa(pbm->msi_queues)));
+
+       fire_write(pbm->pbm_regs + IMONDO_DATA0,
+                  pbm->portid << 6);
+       fire_write(pbm->pbm_regs + IMONDO_DATA1, 0);
+
+       fire_write(pbm->pbm_regs + MSI_32BIT_ADDR,
+                  pbm->msi32_start);
+       fire_write(pbm->pbm_regs + MSI_64BIT_ADDR,
+                  pbm->msi64_start);
+
+       for (i = 0; i < pbm->msiq_num; i++) {
+               fire_write(pbm->pbm_regs + EVENT_QUEUE_HEAD(i), 0);
+               fire_write(pbm->pbm_regs + EVENT_QUEUE_TAIL(i), 0);
+       }
+
+       return 0;
+}
+
+static void pci_fire_msiq_free(struct pci_pbm_info *pbm)
+{
+       unsigned long pages, order;
+
+       order = get_order(512 * 1024);
+       pages = (unsigned long) pbm->msi_queues;
+
+       free_pages(pages, order);
+
+       pbm->msi_queues = NULL;
+}
+
+static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
+                                  unsigned long msiqid,
+                                  unsigned long devino)
+{
+       unsigned long cregs = (unsigned long) pbm->pbm_regs;
+       unsigned long imap_reg, iclr_reg, int_ctrlr;
+       unsigned int virt_irq;
+       int fixup;
+       u64 val;
+
+       imap_reg = cregs + (0x001000UL + (devino * 0x08UL));
+       iclr_reg = cregs + (0x001400UL + (devino * 0x08UL));
+
+       /* XXX iterate amongst the 4 IRQ controllers XXX */
+       int_ctrlr = (1UL << 6);
+
+       val = fire_read(imap_reg);
+       val |= (1UL << 63) | int_ctrlr;
+       fire_write(imap_reg, val);
+
+       fixup = ((pbm->portid << 6) | devino) - int_ctrlr;
+
+       virt_irq = build_irq(fixup, iclr_reg, imap_reg);
+       if (!virt_irq)
+               return -ENOMEM;
+
+       fire_write(pbm->pbm_regs +
+                  EVENT_QUEUE_CONTROL_SET(msiqid),
+                  EVENT_QUEUE_CONTROL_SET_EN);
+
+       return virt_irq;
+}
+
+static const struct sparc64_msiq_ops pci_fire_msiq_ops = {
+       .get_head       =       pci_fire_get_head,
+       .dequeue_msi    =       pci_fire_dequeue_msi,
+       .set_head       =       pci_fire_set_head,
+       .msi_setup      =       pci_fire_msi_setup,
+       .msi_teardown   =       pci_fire_msi_teardown,
+       .msiq_alloc     =       pci_fire_msiq_alloc,
+       .msiq_free      =       pci_fire_msiq_free,
+       .msiq_build_irq =       pci_fire_msiq_build_irq,
+};
+
+static void pci_fire_msi_init(struct pci_pbm_info *pbm)
+{
+       sparc64_pbm_msi_init(pbm, &pci_fire_msiq_ops);
+}
+#else /* CONFIG_PCI_MSI */
+static void pci_fire_msi_init(struct pci_pbm_info *pbm)
+{
+}
+#endif /* !(CONFIG_PCI_MSI) */
+
 /* Based at pbm->controller_regs */
 #define FIRE_PARITY_CONTROL    0x470010UL
 #define  FIRE_PARITY_ENAB      0x8000000000000000UL
@@ -171,11 +435,12 @@ static void pci_fire_hw_init(struct pci_pbm_info *pbm)
        fire_write(pbm->pbm_regs + FIRE_PEC_IENAB, ~(u64)0);
 }
 
-static int pci_fire_pbm_init(struct pci_controller_info *p,
-                            struct device_node *dp, u32 portid)
+static int __init pci_fire_pbm_init(struct pci_controller_info *p,
+                                   struct device_node *dp, u32 portid)
 {
        const struct linux_prom64_registers *regs;
        struct pci_pbm_info *pbm;
+       int err;
 
        if ((portid & 1) == 0)
                pbm = &p->pbm_A;
@@ -185,6 +450,8 @@ static int pci_fire_pbm_init(struct pci_controller_info *p,
        pbm->next = pci_pbm_root;
        pci_pbm_root = pbm;
 
+       pbm->numa_node = -1;
+
        pbm->scan_bus = pci_fire_scan_bus;
        pbm->pci_ops = &sun4u_pci_ops;
        pbm->config_space_reg_bits = 12;
@@ -208,7 +475,13 @@ static int pci_fire_pbm_init(struct pci_controller_info *p,
 
        pci_fire_hw_init(pbm);
 
-       return pci_fire_pbm_iommu_init(pbm);
+       err = pci_fire_pbm_iommu_init(pbm);
+       if (err)
+               return err;
+
+       pci_fire_msi_init(pbm);
+
+       return 0;
 }
 
 static inline int portid_compare(u32 x, u32 y)
@@ -218,7 +491,7 @@ static inline int portid_compare(u32 x, u32 y)
        return 0;
 }
 
-void fire_pci_init(struct device_node *dp, const char *model_name)
+void __init fire_pci_init(struct device_node *dp, const char *model_name)
 {
        struct pci_controller_info *p;
        u32 portid = of_getintprop_default(dp, "portid", 0xff);
@@ -249,13 +522,6 @@ void fire_pci_init(struct device_node *dp, const char *model_name)
 
        p->pbm_B.iommu = iommu;
 
-       /* XXX MSI support XXX */
-
-       /* Like PSYCHO and SCHIZO we have a 2GB aligned area
-        * for memory space.
-        */
-       pci_memspace_mask = 0x7fffffffUL;
-
        if (pci_fire_pbm_init(p, dp, portid))
                goto fatal_memory_error;