]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - include/asm-ia64/processor.h
Merge branch 'orion-fixes2'
[linux-2.6-omap-h63xx.git] / include / asm-ia64 / processor.h
index ead68f8eb883cd254c8801ce59a61640e1f5d2c8..6aff126fc07ea31802ec249613e9237c16472875 100644 (file)
 #include <asm/ptrace.h>
 #include <asm/ustack.h>
 
+#define IA64_NUM_PHYS_STACK_REG        96
 #define IA64_NUM_DBG_REGS      8
-/*
- * Limits for PMC and PMD are set to less than maximum architected values
- * but should be sufficient for a while
- */
-#define IA64_NUM_PMC_REGS      64
-#define IA64_NUM_PMD_REGS      64
 
 #define DEFAULT_MAP_BASE       __IA64_UL_CONST(0x2000000000000000)
 #define DEFAULT_TASK_SIZE      __IA64_UL_CONST(0xa000000000000000)
@@ -36,7 +31,8 @@
  * each (assuming 8KB page size), for a total of 8TB of user virtual
  * address space.
  */
-#define TASK_SIZE              (current->thread.task_size)
+#define TASK_SIZE_OF(tsk)      ((tsk)->thread.task_size)
+#define TASK_SIZE              TASK_SIZE_OF(current)
 
 /*
  * This decides where the kernel will search for a free chunk of vm
@@ -123,6 +119,69 @@ struct ia64_psr {
        __u64 reserved4 : 19;
 };
 
+union ia64_isr {
+       __u64  val;
+       struct {
+               __u64 code : 16;
+               __u64 vector : 8;
+               __u64 reserved1 : 8;
+               __u64 x : 1;
+               __u64 w : 1;
+               __u64 r : 1;
+               __u64 na : 1;
+               __u64 sp : 1;
+               __u64 rs : 1;
+               __u64 ir : 1;
+               __u64 ni : 1;
+               __u64 so : 1;
+               __u64 ei : 2;
+               __u64 ed : 1;
+               __u64 reserved2 : 20;
+       };
+};
+
+union ia64_lid {
+       __u64 val;
+       struct {
+               __u64  rv  : 16;
+               __u64  eid : 8;
+               __u64  id  : 8;
+               __u64  ig  : 32;
+       };
+};
+
+union ia64_tpr {
+       __u64 val;
+       struct {
+               __u64 ig0 : 4;
+               __u64 mic : 4;
+               __u64 rsv : 8;
+               __u64 mmi : 1;
+               __u64 ig1 : 47;
+       };
+};
+
+union ia64_itir {
+       __u64 val;
+       struct {
+               __u64 rv3  :  2; /* 0-1 */
+               __u64 ps   :  6; /* 2-7 */
+               __u64 key  : 24; /* 8-31 */
+               __u64 rv4  : 32; /* 32-63 */
+       };
+};
+
+union  ia64_rr {
+       __u64 val;
+       struct {
+               __u64  ve       :  1;  /* enable hw walker */
+               __u64  reserved0:  1;  /* reserved */
+               __u64  ps       :  6;  /* log page size */
+               __u64  rid      : 24;  /* region id */
+               __u64  reserved1: 32;  /* reserved */
+       };
+};
+
 /*
  * CPU type, hardware bug flags, and per-CPU state.  Frequently used
  * state comes earlier:
@@ -216,7 +275,7 @@ struct desc_struct {
        unsigned int a, b;
 };
 
-#define desc_empty(desc)               (!((desc)->a + (desc)->b))
+#define desc_empty(desc)               (!((desc)->a | (desc)->b))
 #define desc_equal(desc1, desc2)       (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
 
 #define GDT_ENTRY_TLS_ENTRIES  3
@@ -225,7 +284,7 @@ struct desc_struct {
 
 #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
 
-struct partial_page_list;
+struct ia64_partial_page_list;
 #endif
 
 struct thread_struct {
@@ -247,7 +306,7 @@ struct thread_struct {
        __u64 fdr;                      /* IA32 fp except. data reg */
        __u64 old_k1;                   /* old value of ar.k1 */
        __u64 old_iob;                  /* old IOBase value */
-       struct partial_page_list *ppl;  /* partial page list for 4K page size issue */
+       struct ia64_partial_page_list *ppl; /* partial page list for 4K page size issue */
         /* cached TLS descriptors. */
        struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
 
@@ -263,13 +322,9 @@ struct thread_struct {
 # define INIT_THREAD_IA32
 #endif /* CONFIG_IA32_SUPPORT */
 #ifdef CONFIG_PERFMON
-       __u64 pmcs[IA64_NUM_PMC_REGS];
-       __u64 pmds[IA64_NUM_PMD_REGS];
        void *pfm_context;                   /* pointer to detailed PMU context */
        unsigned long pfm_needs_checking;    /* when >0, pending perfmon work on kernel exit */
-# define INIT_THREAD_PM                .pmcs =                 {0UL, },  \
-                               .pmds =                 {0UL, },  \
-                               .pfm_context =          NULL,     \
+# define INIT_THREAD_PM                .pfm_context =          NULL,     \
                                .pfm_needs_checking =   0UL,
 #else
 # define INIT_THREAD_PM
@@ -304,9 +359,9 @@ struct thread_struct {
        regs->ar_bspstore = current->thread.rbs_bot;                                            \
        regs->ar_fpsr = FPSR_DEFAULT;                                                           \
        regs->loadrs = 0;                                                                       \
-       regs->r8 = current->mm->dumpable;       /* set "don't zap registers" flag */            \
+       regs->r8 = get_dumpable(current->mm);   /* set "don't zap registers" flag */            \
        regs->r12 = new_sp - 16;        /* allocate 16 byte scratch area */                     \
-       if (unlikely(!current->mm->dumpable)) {                                                 \
+       if (unlikely(!get_dumpable(current->mm))) {                                                     \
                /*                                                                              \
                 * Zap scratch regs to avoid leaking bits between processes with different      \
                 * uid/privileges.                                                              \
@@ -481,7 +536,7 @@ ia64_set_psr (__u64 psr)
 {
        ia64_stop();
        ia64_setreg(_IA64_REG_PSR_L, psr);
-       ia64_srlz_d();
+       ia64_srlz_i();
 }
 
 /*