4 * This provides a low-level interface to the hardware's Debug Store
5 * feature that is used for branch trace store (BTS) and
6 * precise-event based sampling (PEBS).
9 * - per-thread and per-cpu allocation of BTS and PEBS
10 * - buffer memory allocation (optional)
11 * - buffer overflow handling
15 * - get_task_struct on all parameter tasks
16 * - current is allowed to trace parameter tasks
19 * Copyright (C) 2007-2008 Intel Corporation.
20 * Markus Metzger <markus.t.metzger@intel.com>, 2007-2008
28 #include <linux/errno.h>
29 #include <linux/string.h>
30 #include <linux/slab.h>
31 #include <linux/sched.h>
35 * The configuration for a particular DS hardware implementation.
37 struct ds_configuration {
38 /* the size of the DS structure in bytes */
39 unsigned char sizeof_ds;
40 /* the size of one pointer-typed field in the DS structure in bytes;
41 this covers the first 8 fields related to buffer management. */
42 unsigned char sizeof_field;
43 /* the size of a BTS/PEBS record in bytes */
44 unsigned char sizeof_rec[2];
46 static struct ds_configuration ds_cfg;
50 * Debug Store (DS) save area configuration (see Intel64 and IA32
51 * Architectures Software Developer's Manual, section 18.5)
53 * The DS configuration consists of the following fields; different
54 * architetures vary in the size of those fields.
55 * - double-word aligned base linear address of the BTS buffer
56 * - write pointer into the BTS buffer
57 * - end linear address of the BTS buffer (one byte beyond the end of
59 * - interrupt pointer into BTS buffer
60 * (interrupt occurs when write pointer passes interrupt pointer)
61 * - double-word aligned base linear address of the PEBS buffer
62 * - write pointer into the PEBS buffer
63 * - end linear address of the PEBS buffer (one byte beyond the end of
65 * - interrupt pointer into PEBS buffer
66 * (interrupt occurs when write pointer passes interrupt pointer)
67 * - value to which counter is reset following counter overflow
69 * Later architectures use 64bit pointers throughout, whereas earlier
70 * architectures use 32bit pointers in 32bit mode.
73 * We compute the base address for the first 8 fields based on:
74 * - the field size stored in the DS configuration
75 * - the relative field position
76 * - an offset giving the start of the respective region
78 * This offset is further used to index various arrays holding
79 * information for BTS and PEBS at the respective index.
81 * On later 32bit processors, we only access the lower 32bit of the
82 * 64bit pointer fields. The upper halves will be zeroed out.
89 ds_interrupt_threshold,
97 static inline unsigned long ds_get(const unsigned char *base,
98 enum ds_qualifier qual, enum ds_field field)
100 base += (ds_cfg.sizeof_field * (field + (4 * qual)));
101 return *(unsigned long *)base;
104 static inline void ds_set(unsigned char *base, enum ds_qualifier qual,
105 enum ds_field field, unsigned long value)
107 base += (ds_cfg.sizeof_field * (field + (4 * qual)));
108 (*(unsigned long *)base) = value;
113 * Locking is done only for allocating BTS or PEBS resources and for
114 * guarding context and buffer memory allocation.
116 * Most functions require the current task to own the ds context part
117 * they are going to access. All the locking is done when validating
118 * access to the context.
120 static spinlock_t ds_lock = __SPIN_LOCK_UNLOCKED(ds_lock);
123 * Validate that the current task is allowed to access the BTS/PEBS
124 * buffer of the parameter task.
126 * Returns 0, if access is granted; -Eerrno, otherwise.
128 static inline int ds_validate_access(struct ds_context *context,
129 enum ds_qualifier qual)
134 if (context->owner[qual] == current)
142 * We either support (system-wide) per-cpu or per-thread allocation.
143 * We distinguish the two based on the task_struct pointer, where a
144 * NULL pointer indicates per-cpu allocation for the current cpu.
146 * Allocations are use-counted. As soon as resources are allocated,
147 * further allocations must be of the same type (per-cpu or
148 * per-thread). We model this by counting allocations (i.e. the number
149 * of tracers of a certain type) for one type negatively:
151 * >0 number of per-thread tracers
152 * <0 number of per-cpu tracers
154 * The below functions to get and put tracers and to check the
155 * allocation type require the ds_lock to be held by the caller.
157 * Tracers essentially gives the number of ds contexts for a certain
158 * type of allocation.
162 static inline void get_tracer(struct task_struct *task)
164 tracers += (task ? 1 : -1);
167 static inline void put_tracer(struct task_struct *task)
169 tracers -= (task ? 1 : -1);
172 static inline int check_tracer(struct task_struct *task)
174 return (task ? (tracers >= 0) : (tracers <= 0));
179 * The DS context is either attached to a thread or to a cpu:
180 * - in the former case, the thread_struct contains a pointer to the
182 * - in the latter case, we use a static array of per-cpu context
185 * Contexts are use-counted. They are allocated on first access and
186 * deallocated when the last user puts the context.
188 * We distinguish between an allocating and a non-allocating get of a
190 * - the allocating get is used for requesting BTS/PEBS resources. It
191 * requires the caller to hold the global ds_lock.
192 * - the non-allocating get is used for all other cases. A
193 * non-existing context indicates an error. It acquires and releases
194 * the ds_lock itself for obtaining the context.
196 * A context and its DS configuration are allocated and deallocated
197 * together. A context always has a DS configuration of the
200 static DEFINE_PER_CPU(struct ds_context *, system_context);
202 #define this_system_context per_cpu(system_context, smp_processor_id())
205 * Returns the pointer to the parameter task's context or to the
206 * system-wide context, if task is NULL.
208 * Increases the use count of the returned context, if not NULL.
210 static inline struct ds_context *ds_get_context(struct task_struct *task)
212 struct ds_context *context;
216 context = (task ? task->thread.ds_ctx : this_system_context);
220 spin_unlock(&ds_lock);
226 * Same as ds_get_context, but allocates the context and it's DS
227 * structure, if necessary; returns NULL; if out of memory.
229 * pre: requires ds_lock to be held
231 static inline struct ds_context *ds_alloc_context(struct task_struct *task)
233 struct ds_context **p_context =
234 (task ? &task->thread.ds_ctx : &this_system_context);
235 struct ds_context *context = *p_context;
238 context = kzalloc(sizeof(*context), GFP_KERNEL);
243 context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL);
249 *p_context = context;
251 context->this = p_context;
252 context->task = task;
255 set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
257 if (!task || (task == current))
258 wrmsr(MSR_IA32_DS_AREA, (unsigned long)context->ds, 0);
269 * Decreases the use count of the parameter context, if not NULL.
270 * Deallocates the context, if the use count reaches zero.
272 static inline void ds_put_context(struct ds_context *context)
279 if (--context->count)
282 *(context->this) = NULL;
285 clear_tsk_thread_flag(context->task, TIF_DS_AREA_MSR);
287 if (!context->task || (context->task == current))
288 wrmsrl(MSR_IA32_DS_AREA, 0);
290 put_tracer(context->task);
292 /* free any leftover buffers from tracers that did not
293 * deallocate them properly. */
294 kfree(context->buffer[ds_bts]);
295 kfree(context->buffer[ds_pebs]);
299 spin_unlock(&ds_lock);
304 * Handle a buffer overflow
306 * task: the task whose buffers are overflowing;
307 * NULL for a buffer overflow on the current cpu
308 * context: the ds context
309 * qual: the buffer type
311 static void ds_overflow(struct task_struct *task, struct ds_context *context,
312 enum ds_qualifier qual)
317 if (context->callback[qual])
318 (*context->callback[qual])(task);
320 /* todo: do some more overflow handling */
325 * Allocate a non-pageable buffer of the parameter size.
326 * Checks the memory and the locked memory rlimit.
328 * Returns the buffer, if successful;
329 * NULL, if out of memory or rlimit exceeded.
331 * size: the requested buffer size in bytes
332 * pages (out): if not NULL, contains the number of pages reserved
334 static inline void *ds_allocate_buffer(size_t size, unsigned int *pages)
336 unsigned long rlim, vm, pgsz;
339 pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
341 rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
342 vm = current->mm->total_vm + pgsz;
346 rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
347 vm = current->mm->locked_vm + pgsz;
351 buffer = kzalloc(size, GFP_KERNEL);
355 current->mm->total_vm += pgsz;
356 current->mm->locked_vm += pgsz;
364 static int ds_request(struct task_struct *task, void *base, size_t size,
365 ds_ovfl_callback_t ovfl, enum ds_qualifier qual)
367 struct ds_context *context;
368 unsigned long buffer, adj;
369 const unsigned long alignment = (1 << 3);
372 if (!ds_cfg.sizeof_ds)
375 /* we require some space to do alignment adjustments below */
376 if (size < (alignment + ds_cfg.sizeof_rec[qual]))
379 /* buffer overflow notification is not yet implemented */
386 if (!check_tracer(task))
390 context = ds_alloc_context(task);
395 if (context->owner[qual] == current)
398 if (context->owner[qual] != NULL)
400 context->owner[qual] = current;
402 spin_unlock(&ds_lock);
407 base = ds_allocate_buffer(size, &context->pages[qual]);
411 context->buffer[qual] = base;
415 context->callback[qual] = ovfl;
417 /* adjust the buffer address and size to meet alignment
419 * - buffer is double-word aligned
420 * - size is multiple of record size
422 * We checked the size at the very beginning; we have enough
423 * space to do the adjustment.
425 buffer = (unsigned long)base;
427 adj = ALIGN(buffer, alignment) - buffer;
431 size /= ds_cfg.sizeof_rec[qual];
432 size *= ds_cfg.sizeof_rec[qual];
434 ds_set(context->ds, qual, ds_buffer_base, buffer);
435 ds_set(context->ds, qual, ds_index, buffer);
436 ds_set(context->ds, qual, ds_absolute_maximum, buffer + size);
439 /* todo: select a suitable interrupt threshold */
441 ds_set(context->ds, qual,
442 ds_interrupt_threshold, buffer + size + 1);
444 /* we keep the context until ds_release */
448 context->owner[qual] = NULL;
449 ds_put_context(context);
453 spin_unlock(&ds_lock);
454 ds_put_context(context);
458 int ds_request_bts(struct task_struct *task, void *base, size_t size,
459 ds_ovfl_callback_t ovfl)
461 return ds_request(task, base, size, ovfl, ds_bts);
464 int ds_request_pebs(struct task_struct *task, void *base, size_t size,
465 ds_ovfl_callback_t ovfl)
467 return ds_request(task, base, size, ovfl, ds_pebs);
470 static int ds_release(struct task_struct *task, enum ds_qualifier qual)
472 struct ds_context *context;
475 context = ds_get_context(task);
476 error = ds_validate_access(context, qual);
480 kfree(context->buffer[qual]);
481 context->buffer[qual] = 0;
483 current->mm->total_vm -= context->pages[qual];
484 current->mm->locked_vm -= context->pages[qual];
485 context->pages[qual] = 0;
486 context->owner[qual] = 0;
489 * we put the context twice:
490 * once for the ds_get_context
491 * once for the corresponding ds_request
493 ds_put_context(context);
495 ds_put_context(context);
499 int ds_release_bts(struct task_struct *task)
501 return ds_release(task, ds_bts);
504 int ds_release_pebs(struct task_struct *task)
506 return ds_release(task, ds_pebs);
509 static int ds_get_index(struct task_struct *task, size_t *pos,
510 enum ds_qualifier qual)
512 struct ds_context *context;
513 unsigned long base, index;
516 context = ds_get_context(task);
517 error = ds_validate_access(context, qual);
521 base = ds_get(context->ds, qual, ds_buffer_base);
522 index = ds_get(context->ds, qual, ds_index);
524 error = ((index - base) / ds_cfg.sizeof_rec[qual]);
528 ds_put_context(context);
532 int ds_get_bts_index(struct task_struct *task, size_t *pos)
534 return ds_get_index(task, pos, ds_bts);
537 int ds_get_pebs_index(struct task_struct *task, size_t *pos)
539 return ds_get_index(task, pos, ds_pebs);
542 static int ds_get_end(struct task_struct *task, size_t *pos,
543 enum ds_qualifier qual)
545 struct ds_context *context;
546 unsigned long base, end;
549 context = ds_get_context(task);
550 error = ds_validate_access(context, qual);
554 base = ds_get(context->ds, qual, ds_buffer_base);
555 end = ds_get(context->ds, qual, ds_absolute_maximum);
557 error = ((end - base) / ds_cfg.sizeof_rec[qual]);
561 ds_put_context(context);
565 int ds_get_bts_end(struct task_struct *task, size_t *pos)
567 return ds_get_end(task, pos, ds_bts);
570 int ds_get_pebs_end(struct task_struct *task, size_t *pos)
572 return ds_get_end(task, pos, ds_pebs);
575 static int ds_access(struct task_struct *task, size_t index,
576 const void **record, enum ds_qualifier qual)
578 struct ds_context *context;
579 unsigned long base, idx;
585 context = ds_get_context(task);
586 error = ds_validate_access(context, qual);
590 base = ds_get(context->ds, qual, ds_buffer_base);
591 idx = base + (index * ds_cfg.sizeof_rec[qual]);
594 if (idx > ds_get(context->ds, qual, ds_absolute_maximum))
597 *record = (const void *)idx;
598 error = ds_cfg.sizeof_rec[qual];
600 ds_put_context(context);
604 int ds_access_bts(struct task_struct *task, size_t index, const void **record)
606 return ds_access(task, index, record, ds_bts);
609 int ds_access_pebs(struct task_struct *task, size_t index, const void **record)
611 return ds_access(task, index, record, ds_pebs);
614 static int ds_write(struct task_struct *task, const void *record, size_t size,
615 enum ds_qualifier qual, int force)
617 struct ds_context *context;
624 context = ds_get_context(task);
629 error = ds_validate_access(context, qual);
636 unsigned long base, index, end, write_end, int_th;
637 unsigned long write_size, adj_write_size;
640 * write as much as possible without producing an
641 * overflow interrupt.
643 * interrupt_threshold must either be
644 * - bigger than absolute_maximum or
645 * - point to a record between buffer_base and absolute_maximum
647 * index points to a valid record.
649 base = ds_get(context->ds, qual, ds_buffer_base);
650 index = ds_get(context->ds, qual, ds_index);
651 end = ds_get(context->ds, qual, ds_absolute_maximum);
652 int_th = ds_get(context->ds, qual, ds_interrupt_threshold);
654 write_end = min(end, int_th);
656 /* if we are already beyond the interrupt threshold,
657 * we fill the entire buffer */
658 if (write_end <= index)
661 if (write_end <= index)
664 write_size = min((unsigned long) size, write_end - index);
665 memcpy((void *)index, record, write_size);
667 record = (const char *)record + write_size;
671 adj_write_size = write_size / ds_cfg.sizeof_rec[qual];
672 adj_write_size *= ds_cfg.sizeof_rec[qual];
674 /* zero out trailing bytes */
675 memset((char *)index + write_size, 0,
676 adj_write_size - write_size);
677 index += adj_write_size;
681 ds_set(context->ds, qual, ds_index, index);
684 ds_overflow(task, context, qual);
688 ds_put_context(context);
692 int ds_write_bts(struct task_struct *task, const void *record, size_t size)
694 return ds_write(task, record, size, ds_bts, /* force = */ 0);
697 int ds_write_pebs(struct task_struct *task, const void *record, size_t size)
699 return ds_write(task, record, size, ds_pebs, /* force = */ 0);
702 int ds_unchecked_write_bts(struct task_struct *task,
703 const void *record, size_t size)
705 return ds_write(task, record, size, ds_bts, /* force = */ 1);
708 int ds_unchecked_write_pebs(struct task_struct *task,
709 const void *record, size_t size)
711 return ds_write(task, record, size, ds_pebs, /* force = */ 1);
714 static int ds_reset_or_clear(struct task_struct *task,
715 enum ds_qualifier qual, int clear)
717 struct ds_context *context;
718 unsigned long base, end;
721 context = ds_get_context(task);
722 error = ds_validate_access(context, qual);
726 base = ds_get(context->ds, qual, ds_buffer_base);
727 end = ds_get(context->ds, qual, ds_absolute_maximum);
730 memset((void *)base, 0, end - base);
732 ds_set(context->ds, qual, ds_index, base);
736 ds_put_context(context);
740 int ds_reset_bts(struct task_struct *task)
742 return ds_reset_or_clear(task, ds_bts, /* clear = */ 0);
745 int ds_reset_pebs(struct task_struct *task)
747 return ds_reset_or_clear(task, ds_pebs, /* clear = */ 0);
750 int ds_clear_bts(struct task_struct *task)
752 return ds_reset_or_clear(task, ds_bts, /* clear = */ 1);
755 int ds_clear_pebs(struct task_struct *task)
757 return ds_reset_or_clear(task, ds_pebs, /* clear = */ 1);
760 int ds_get_pebs_reset(struct task_struct *task, u64 *value)
762 struct ds_context *context;
768 context = ds_get_context(task);
769 error = ds_validate_access(context, ds_pebs);
773 *value = *(u64 *)(context->ds + (ds_cfg.sizeof_field * 8));
777 ds_put_context(context);
781 int ds_set_pebs_reset(struct task_struct *task, u64 value)
783 struct ds_context *context;
786 context = ds_get_context(task);
787 error = ds_validate_access(context, ds_pebs);
791 *(u64 *)(context->ds + (ds_cfg.sizeof_field * 8)) = value;
795 ds_put_context(context);
799 static const struct ds_configuration ds_cfg_var = {
800 .sizeof_ds = sizeof(long) * 12,
801 .sizeof_field = sizeof(long),
802 .sizeof_rec[ds_bts] = sizeof(long) * 3,
803 .sizeof_rec[ds_pebs] = sizeof(long) * 10
805 static const struct ds_configuration ds_cfg_64 = {
808 .sizeof_rec[ds_bts] = 8 * 3,
809 .sizeof_rec[ds_pebs] = 8 * 10
813 ds_configure(const struct ds_configuration *cfg)
818 void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
822 switch (c->x86_model) {
824 case 0xE: /* Pentium M */
825 ds_configure(&ds_cfg_var);
827 case 0xF: /* Core2 */
828 case 0x1C: /* Atom */
829 ds_configure(&ds_cfg_64);
832 /* sorry, don't know about them */
837 switch (c->x86_model) {
840 case 0x2: /* Netburst */
841 ds_configure(&ds_cfg_var);
844 /* sorry, don't know about them */
849 /* sorry, don't know about them */
854 void ds_free(struct ds_context *context)
856 /* This is called when the task owning the parameter context
857 * is dying. There should not be any user of that context left
858 * to disturb us, anymore. */
859 unsigned long leftovers = context->count;
861 ds_put_context(context);
863 #endif /* CONFIG_X86_DS */