]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/ia64/kernel/setup.c
Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
[linux-2.6-omap-h63xx.git] / arch / ia64 / kernel / setup.c
index ebd1a09f32016b870ccb758dac93f1ade566f866..e9596cd0cdab21595f7c5f85d7da7d2952beb5d6 100644 (file)
@@ -59,6 +59,7 @@
 #include <asm/setup.h>
 #include <asm/smp.h>
 #include <asm/system.h>
+#include <asm/tlbflush.h>
 #include <asm/unistd.h>
 #include <asm/hpsim.h>
 
@@ -176,6 +177,29 @@ filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
        return 0;
 }
 
+/*
+ * Similar to "filter_rsvd_memory()", but the reserved memory ranges
+ * are not filtered out.
+ */
+int __init
+filter_memory(unsigned long start, unsigned long end, void *arg)
+{
+       void (*func)(unsigned long, unsigned long, int);
+
+#if IGNORE_PFN0
+       if (start == PAGE_OFFSET) {
+               printk(KERN_WARNING "warning: skipping physical page 0\n");
+               start += PAGE_SIZE;
+               if (start >= end)
+                       return 0;
+       }
+#endif
+       func = arg;
+       if (start < end)
+               call_pernode_memory(__pa(start), end - start, func);
+       return 0;
+}
+
 static void __init
 sort_regions (struct rsvd_region *rsvd_region, int max)
 {
@@ -215,6 +239,25 @@ __initcall(register_memory);
 
 
 #ifdef CONFIG_KEXEC
+
+/*
+ * This function checks if the reserved crashkernel is allowed on the specific
+ * IA64 machine flavour. Machines without an IO TLB use swiotlb and require
+ * some memory below 4 GB (i.e. in 32 bit area), see the implementation of
+ * lib/swiotlb.c. The hpzx1 architecture has an IO TLB but cannot use that
+ * in kdump case. See the comment in sba_init() in sba_iommu.c.
+ *
+ * So, the only machvec that really supports loading the kdump kernel
+ * over 4 GB is "sn2".
+ */
+static int __init check_crashkernel_memory(unsigned long pbase, size_t size)
+{
+       if (ia64_platform_is("sn2") || ia64_platform_is("uv"))
+               return 1;
+       else
+               return pbase < (1UL << 32);
+}
+
 static void __init setup_crashkernel(unsigned long total, int *n)
 {
        unsigned long long base = 0, size = 0;
@@ -228,6 +271,16 @@ static void __init setup_crashkernel(unsigned long total, int *n)
                        base = kdump_find_rsvd_region(size,
                                        rsvd_region, *n);
                }
+
+               if (!check_crashkernel_memory(base, size)) {
+                       pr_warning("crashkernel: There would be kdump memory "
+                               "at %ld GB but this is unusable because it "
+                               "must\nbe below 4 GB. Change the memory "
+                               "configuration of the machine.\n",
+                               (unsigned long)(base >> 30));
+                       return;
+               }
+
                if (base != ~0UL) {
                        printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
                                        "for crashkernel (System RAM: %ldMB)\n",
@@ -493,6 +546,8 @@ setup_arch (char **cmdline_p)
        acpi_table_init();
 # ifdef CONFIG_ACPI_NUMA
        acpi_numa_init();
+       per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
+               32 : cpus_weight(early_cpu_possible_map)), additional_cpus);
 # endif
 #else
 # ifdef CONFIG_SMP
@@ -690,7 +745,7 @@ get_model_name(__u8 family, __u8 model)
        if (overflow++ == 0)
                printk(KERN_ERR
                       "%s: Table overflow. Some processor model information will be missing\n",
-                      __FUNCTION__);
+                      __func__);
        return "Unknown";
 }
 
@@ -785,7 +840,7 @@ get_max_cacheline_size (void)
         status = ia64_pal_cache_summary(&levels, &unique_caches);
         if (status != 0) {
                 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
-                       __FUNCTION__, status);
+                       __func__, status);
                 max = SMP_CACHE_BYTES;
                /* Safest setup for "flush_icache_range()" */
                ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
@@ -798,7 +853,7 @@ get_max_cacheline_size (void)
                if (status != 0) {
                        printk(KERN_ERR
                               "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n",
-                              __FUNCTION__, l, status);
+                              __func__, l, status);
                        max = SMP_CACHE_BYTES;
                        /* The safest setup for "flush_icache_range()" */
                        cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
@@ -814,7 +869,7 @@ get_max_cacheline_size (void)
                        if (status != 0) {
                                printk(KERN_ERR
                                "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n",
-                                       __FUNCTION__, l, status);
+                                       __func__, l, status);
                                /* The safest setup for "flush_icache_range()" */
                                cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
                        }
@@ -946,9 +1001,10 @@ cpu_init (void)
 #endif
 
        /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
-       if (ia64_pal_vm_summary(NULL, &vmi) == 0)
+       if (ia64_pal_vm_summary(NULL, &vmi) == 0) {
                max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
-       else {
+               setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, NPTCG_FROM_PAL);
+       } else {
                printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
                max_ctx = (1U << 15) - 1;       /* use architected minimum */
        }