]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/sparc64/kernel/smp.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into for-linus
[linux-2.6-omap-h63xx.git] / arch / sparc64 / kernel / smp.c
index 407d74a8a5421718114232a2cc55a769d3447b78..a8052b76df41b3c15694493cd7e92df7d9b8d1bb 100644 (file)
@@ -46,8 +46,6 @@
 #include <asm/ldc.h>
 #include <asm/hypervisor.h>
 
-extern void calibrate_delay(void);
-
 int sparc64_multi_core __read_mostly;
 
 cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
@@ -236,8 +234,9 @@ void smp_synchronize_tick_client(void)
                       t[i].rt, t[i].master, t[i].diff, t[i].lat);
 #endif
 
-       printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
-              "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
+       printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
+              "(last diff %ld cycles, maxerr %lu cycles)\n",
+              smp_processor_id(), delta, rt);
 }
 
 static void smp_start_sync_tick_client(int cpu);
@@ -459,7 +458,7 @@ again:
        }
 }
 
-static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
+static inline void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
 {
        u64 pstate;
        int i;
@@ -475,7 +474,7 @@ static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, c
  */
 static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
 {
-       u64 pstate, ver;
+       u64 pstate, ver, busy_mask;
        int nack_busy_id, is_jbus, need_more;
 
        if (cpus_empty(mask))
@@ -507,14 +506,20 @@ retry:
                               "i" (ASI_INTR_W));
 
        nack_busy_id = 0;
+       busy_mask = 0;
        {
                int i;
 
                for_each_cpu_mask(i, mask) {
                        u64 target = (i << 14) | 0x70;
 
-                       if (!is_jbus)
+                       if (is_jbus) {
+                               busy_mask |= (0x1UL << (i * 2));
+                       } else {
                                target |= (nack_busy_id << 24);
+                               busy_mask |= (0x1UL <<
+                                             (nack_busy_id * 2));
+                       }
                        __asm__ __volatile__(
                                "stxa   %%g0, [%0] %1\n\t"
                                "membar #Sync\n\t"
@@ -530,15 +535,16 @@ retry:
 
        /* Now, poll for completion. */
        {
-               u64 dispatch_stat;
+               u64 dispatch_stat, nack_mask;
                long stuck;
 
                stuck = 100000 * nack_busy_id;
+               nack_mask = busy_mask << 1;
                do {
                        __asm__ __volatile__("ldxa      [%%g0] %1, %0"
                                             : "=r" (dispatch_stat)
                                             : "i" (ASI_INTR_DISPATCH_STAT));
-                       if (dispatch_stat == 0UL) {
+                       if (!(dispatch_stat & (busy_mask | nack_mask))) {
                                __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
                                                     : : "r" (pstate));
                                if (unlikely(need_more)) {
@@ -555,12 +561,12 @@ retry:
                        }
                        if (!--stuck)
                                break;
-               } while (dispatch_stat & 0x5555555555555555UL);
+               } while (dispatch_stat & busy_mask);
 
                __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
                                     : : "r" (pstate));
 
-               if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
+               if (dispatch_stat & busy_mask) {
                        /* Busy bits will not clear, continue instead
                         * of freezing up on this cpu.
                         */
@@ -906,7 +912,7 @@ extern atomic_t dcpage_flushes;
 extern atomic_t dcpage_flushes_xcall;
 #endif
 
-static __inline__ void __local_flush_dcache_page(struct page *page)
+static inline void __local_flush_dcache_page(struct page *page)
 {
 #ifdef DCACHE_ALIASING_POSSIBLE
        __flush_dcache_page(page_address(page),