#include <asm/page.h>
#include <asm/byteorder.h>
+#include <asm/synch.h>
#include <asm/mmu.h>
#define SIO_CONFIG_RA 0x398
#if defined(CONFIG_4xx)
#include <asm/ibm4xx.h>
+#elif defined(CONFIG_PPC_MPC52xx)
+#include <asm/mpc52xx.h>
#elif defined(CONFIG_8xx)
#include <asm/mpc8xx.h>
#elif defined(CONFIG_8260)
#define outsl(port, buf, nl) _outsl_ns((port)+___IO_BASE, (buf), (nl))
/*
- * On powermacs, we will get a machine check exception if we
- * try to read data from a non-existent I/O port. Because the
- * machine check is an asynchronous exception, it isn't
+ * On powermacs and 8xx we will get a machine check exception
+ * if we try to read data from a non-existent I/O port. Because
+ * the machine check is an asynchronous exception, it isn't
* well-defined which instruction SRR0 will point to when the
* exception occurs.
* With the sequence below (twi; isync; nop), we have found that
{ \
unsigned int x; \
__asm__ __volatile__( \
- op " %0,0,%1\n" \
+ "0:" op " %0,0,%1\n" \
"1: twi 0,%0,0\n" \
"2: isync\n" \
"3: nop\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 2\n" \
+ " .long 0b,5b\n" \
" .long 1b,5b\n" \
" .long 2b,5b\n" \
" .long 3b,5b\n" \
extern __inline__ void name(unsigned int val, unsigned int port) \
{ \
__asm__ __volatile__( \
- op " %0,0,%1\n" \
+ "0:" op " %0,0,%1\n" \
"1: sync\n" \
"2:\n" \
".section __ex_table,\"a\"\n" \
" .align 2\n" \
+ " .long 0b,2b\n" \
" .long 1b,2b\n" \
".previous" \
: : "r" (val), "r" (port + ___IO_BASE)); \
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define page_to_bus(page) (page_to_phys(page) + PCI_DRAM_OFFSET)
-/*
- * Enforce In-order Execution of I/O:
- * Acts as a barrier to ensure all previous I/O accesses have
- * completed before any further ones are issued.
- */
-extern inline void eieio(void)
-{
- __asm__ __volatile__ ("eieio" : : : "memory");
-}
-
/* Enforce in-order execution of data I/O.
* No distinction between read/write on PPC; use eieio for all three.
*/
#include <asm/mpc8260_pci9.h>
#endif
+#ifdef CONFIG_NOT_COHERENT_CACHE
+
+#define dma_cache_inv(_start,_size) \
+ invalidate_dcache_range(_start, (_start + _size))
+#define dma_cache_wback(_start,_size) \
+ clean_dcache_range(_start, (_start + _size))
+#define dma_cache_wback_inv(_start,_size) \
+ flush_dcache_range(_start, (_start + _size))
+
+#else
+
+#define dma_cache_inv(_start,_size) do { } while (0)
+#define dma_cache_wback(_start,_size) do { } while (0)
+#define dma_cache_wback_inv(_start,_size) do { } while (0)
+
+#endif
+
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access