]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - include/asm-x86/msr.h
x86: introduce native_read_tscp
[linux-2.6-omap-h63xx.git] / include / asm-x86 / msr.h
1 #ifndef __ASM_X86_MSR_H_
2 #define __ASM_X86_MSR_H_
3
4 #include <asm/msr-index.h>
5
6 #ifndef __ASSEMBLY__
7 # include <linux/types.h>
8 #endif
9
10 #ifdef __KERNEL__
11 #ifndef __ASSEMBLY__
12 static inline unsigned long long native_read_tscp(int *aux)
13 {
14         unsigned long low, high;
15         asm volatile (".byte 0x0f,0x01,0xf9"
16                       : "=a" (low), "=d" (high), "=c" (*aux));
17         return low | ((u64)high >> 32);
18 }
19
20 #define rdtscp(low, high, aux)                                          \
21        do {                                                            \
22                 unsigned long long _val = native_read_tscp(&(aux));     \
23                 (low) = (u32)_val;                                      \
24                 (high) = (u32)(_val >> 32);                             \
25        } while (0)
26
27 #define rdtscpll(val, aux) (val) = native_read_tscp(&(aux))
28 #endif
29 #endif
30
31 #ifdef __i386__
32
33 #ifdef __KERNEL__
34 #ifndef __ASSEMBLY__
35
36 #include <asm/errno.h>
37
38 static inline unsigned long long native_read_msr(unsigned int msr)
39 {
40         unsigned long long val;
41
42         asm volatile("rdmsr" : "=A" (val) : "c" (msr));
43         return val;
44 }
45
46 static inline unsigned long long native_read_msr_safe(unsigned int msr,
47                                                       int *err)
48 {
49         unsigned long long val;
50
51         asm volatile("2: rdmsr ; xorl %0,%0\n"
52                      "1:\n\t"
53                      ".section .fixup,\"ax\"\n\t"
54                      "3:  movl %3,%0 ; jmp 1b\n\t"
55                      ".previous\n\t"
56                      ".section __ex_table,\"a\"\n"
57                      "   .align 4\n\t"
58                      "   .long  2b,3b\n\t"
59                      ".previous"
60                      : "=r" (*err), "=A" (val)
61                      : "c" (msr), "i" (-EFAULT));
62
63         return val;
64 }
65
66 static inline void native_write_msr(unsigned int msr, unsigned long long val)
67 {
68         asm volatile("wrmsr" : : "c" (msr), "A"(val));
69 }
70
71 static inline int native_write_msr_safe(unsigned int msr,
72                                         unsigned long long val)
73 {
74         int err;
75         asm volatile("2: wrmsr ; xorl %0,%0\n"
76                      "1:\n\t"
77                      ".section .fixup,\"ax\"\n\t"
78                      "3:  movl %4,%0 ; jmp 1b\n\t"
79                      ".previous\n\t"
80                      ".section __ex_table,\"a\"\n"
81                      "   .align 4\n\t"
82                      "   .long  2b,3b\n\t"
83                      ".previous"
84                      : "=a" (err)
85                      : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)),
86                        "i" (-EFAULT));
87         return err;
88 }
89
90 static inline unsigned long long native_read_tsc(void)
91 {
92         unsigned long long val;
93         asm volatile("rdtsc" : "=A" (val));
94         return val;
95 }
96
97 static inline unsigned long long native_read_pmc(void)
98 {
99         unsigned long long val;
100         asm volatile("rdpmc" : "=A" (val));
101         return val;
102 }
103
104 #ifdef CONFIG_PARAVIRT
105 #include <asm/paravirt.h>
106 #else
107 #include <linux/errno.h>
108 /*
109  * Access to machine-specific registers (available on 586 and better only)
110  * Note: the rd* operations modify the parameters directly (without using
111  * pointer indirection), this allows gcc to optimize better
112  */
113
114 #define rdmsr(msr,val1,val2)                                            \
115         do {                                                            \
116                 u64 __val = native_read_msr(msr);                       \
117                 (val1) = (u32)__val;                                    \
118                 (val2) = (u32)(__val >> 32);                            \
119         } while(0)
120
121 static inline void wrmsr(u32 __msr, u32 __low, u32 __high)
122 {
123         native_write_msr(__msr, ((u64)__high << 32) | __low);
124 }
125
126 #define rdmsrl(msr,val)                                                 \
127         ((val) = native_read_msr(msr))
128
129 #define wrmsrl(msr,val) native_write_msr(msr, val)
130
131 /* wrmsr with exception handling */
132 static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high)
133 {
134         return native_write_msr_safe(__msr, ((u64)__high << 32) | __low);
135 }
136
137 /* rdmsr with exception handling */
138 #define rdmsr_safe(msr,p1,p2)                                           \
139         ({                                                              \
140                 int __err;                                              \
141                 u64 __val = native_read_msr_safe(msr, &__err);          \
142                 (*p1) = (u32)__val;                                     \
143                 (*p2) = (u32)(__val >> 32);                             \
144                 __err;                                                  \
145         })
146
147 #define rdtscl(low)                                             \
148         ((low) = (u32)native_read_tsc())
149
150 #define rdtscll(val)                                            \
151         ((val) = native_read_tsc())
152
153 #define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
154
155 #define rdpmc(counter,low,high)                                 \
156         do {                                                    \
157                 u64 _l = native_read_pmc();                     \
158                 (low)  = (u32)_l;                               \
159                 (high) = (u32)(_l >> 32);                       \
160         } while(0)
161 #endif  /* !CONFIG_PARAVIRT */
162
163 #endif  /* ! __ASSEMBLY__ */
164 #endif  /* __KERNEL__ */
165
166 #else   /* __i386__ */
167
168 #ifndef __ASSEMBLY__
169 #include <linux/errno.h>
170 /*
171  * Access to machine-specific registers (available on 586 and better only)
172  * Note: the rd* operations modify the parameters directly (without using
173  * pointer indirection), this allows gcc to optimize better
174  */
175
176 #define rdmsr(msr,val1,val2) \
177        __asm__ __volatile__("rdmsr" \
178                             : "=a" (val1), "=d" (val2) \
179                             : "c" (msr))
180
181
182 #define rdmsrl(msr,val) do { unsigned long a__,b__; \
183        __asm__ __volatile__("rdmsr" \
184                             : "=a" (a__), "=d" (b__) \
185                             : "c" (msr)); \
186        val = a__ | (b__<<32); \
187 } while(0)
188
189 #define wrmsr(msr,val1,val2) \
190      __asm__ __volatile__("wrmsr" \
191                           : /* no outputs */ \
192                           : "c" (msr), "a" (val1), "d" (val2))
193
194 #define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
195
196 #define rdtsc(low,high) \
197      __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
198
199 #define rdtscl(low) \
200      __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
201
202
203 #define rdtscll(val) do { \
204      unsigned int __a,__d; \
205      __asm__ __volatile__("rdtsc" : "=a" (__a), "=d" (__d)); \
206      (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
207 } while(0)
208
209 #define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
210
211 #define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)
212
213 #define rdpmc(counter,low,high) \
214      __asm__ __volatile__("rdpmc" \
215                           : "=a" (low), "=d" (high) \
216                           : "c" (counter))
217
218
219 #ifdef __KERNEL__
220
221 /* wrmsr with exception handling */
222 #define wrmsr_safe(msr,a,b) ({ int ret__;                       \
223         asm volatile("2: wrmsr ; xorl %0,%0\n"                  \
224                      "1:\n\t"                                   \
225                      ".section .fixup,\"ax\"\n\t"               \
226                      "3:  movl %4,%0 ; jmp 1b\n\t"              \
227                      ".previous\n\t"                            \
228                      ".section __ex_table,\"a\"\n"              \
229                      "   .align 8\n\t"                          \
230                      "   .quad  2b,3b\n\t"                      \
231                      ".previous"                                \
232                      : "=a" (ret__)                             \
233                      : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
234         ret__; })
235
236 #define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
237
238 #define rdmsr_safe(msr,a,b) \
239         ({ int ret__;                                           \
240           asm volatile ("1:       rdmsr\n"                      \
241                         "2:\n"                                  \
242                         ".section .fixup,\"ax\"\n"              \
243                         "3:       movl %4,%0\n"                 \
244                         " jmp 2b\n"                             \
245                         ".previous\n"                           \
246                         ".section __ex_table,\"a\"\n"           \
247                         " .align 8\n"                           \
248                         " .quad 1b,3b\n"                                \
249                         ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b)) \
250                         :"c"(msr), "i"(-EIO), "0"(0));                  \
251           ret__; })
252
253 #endif  /* __ASSEMBLY__ */
254
255 #endif  /* !__i386__ */
256
257 #ifndef __ASSEMBLY__
258
259 #ifdef CONFIG_SMP
260 void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
261 void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
262 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
263 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
264 #else  /*  CONFIG_SMP  */
265 static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
266 {
267         rdmsr(msr_no, *l, *h);
268 }
269 static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
270 {
271         wrmsr(msr_no, l, h);
272 }
273 static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
274 {
275         return rdmsr_safe(msr_no, l, h);
276 }
277 static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
278 {
279         return wrmsr_safe(msr_no, l, h);
280 }
281 #endif  /* CONFIG_SMP */
282 #endif  /* __KERNEL__ */
283 #endif /* __ASSEMBLY__ */
284
285 #endif