1 /* Include in trace.c */
3 #include <linux/kthread.h>
4 #include <linux/delay.h>
6 static inline int trace_valid_entry(struct trace_entry *entry)
18 trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
20 struct trace_entry *entries;
25 BUG_ON(list_empty(&data->trace_pages));
26 page = list_entry(data->trace_pages.next, struct page, lru);
27 entries = page_address(page);
29 if (head_page(data) != entries)
33 * The starting trace buffer always has valid elements,
34 * if any element exists.
36 entries = head_page(data);
38 for (i = 0; i < tr->entries; i++) {
40 if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
41 printk(KERN_CONT ".. invalid entry %d ",
47 if (idx >= ENTRIES_PER_PAGE) {
48 page = virt_to_page(entries);
49 if (page->lru.next == &data->trace_pages) {
50 if (i != tr->entries - 1) {
51 printk(KERN_CONT ".. entries buffer mismatch");
55 page = list_entry(page->lru.next, struct page, lru);
56 entries = page_address(page);
62 page = virt_to_page(entries);
63 if (page->lru.next != &data->trace_pages) {
64 printk(KERN_CONT ".. too many entries");
73 printk(KERN_CONT ".. corrupted trace buffer .. ");
78 * Test the trace buffer to see if all the elements
81 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
83 unsigned long cnt = 0;
87 for_each_possible_cpu(cpu) {
88 if (!head_page(tr->data[cpu]))
91 cnt += tr->data[cpu]->trace_idx;
93 ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
106 #ifdef CONFIG_DYNAMIC_FTRACE
108 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
110 #define STR(x) __STR(x)
111 static int DYN_FTRACE_TEST_NAME(void)
113 /* used to call mcount */
117 /* Test dynamic code modification and ftrace filters */
118 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
119 struct trace_array *tr,
124 int save_ftrace_enabled = ftrace_enabled;
125 int save_tracer_enabled = tracer_enabled;
127 /* The ftrace test PASSED */
128 printk(KERN_CONT "PASSED\n");
129 pr_info("Testing dynamic ftrace: ");
131 /* enable tracing, and record the filter function */
135 /* passed in by parameter to fool gcc from optimizing */
138 /* update the records */
139 ret = ftrace_force_update();
141 printk(KERN_CONT ".. ftraced failed .. ");
145 /* filter only on our function */
146 ftrace_set_filter(STR(DYN_FTRACE_TEST_NAME),
147 sizeof(STR(DYN_FTRACE_TEST_NAME)), 1);
152 /* Sleep for a 1/10 of a second */
155 /* we should have nothing in the buffer */
156 ret = trace_test_buffer(tr, &count);
162 printk(KERN_CONT ".. filter did not filter .. ");
166 /* call our function again */
172 /* stop the tracing. */
174 trace->ctrl_update(tr);
177 /* check the trace buffer */
178 ret = trace_test_buffer(tr, &count);
181 /* we should only have one item */
182 if (!ret && count != 1) {
183 printk(KERN_CONT ".. filter failed ..");
188 ftrace_enabled = save_ftrace_enabled;
189 tracer_enabled = save_tracer_enabled;
191 /* Enable tracing on all functions again */
192 ftrace_set_filter(NULL, 0, 1);
197 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
198 #endif /* CONFIG_DYNAMIC_FTRACE */
200 * Simple verification test of ftrace function tracer.
201 * Enable ftrace, sleep 1/10 second, and then read the trace
202 * buffer to see if all is in order.
205 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
209 int save_ftrace_enabled = ftrace_enabled;
210 int save_tracer_enabled = tracer_enabled;
212 /* make sure msleep has been recorded */
215 /* force the recorded functions to be traced */
216 ret = ftrace_force_update();
218 printk(KERN_CONT ".. ftraced failed .. ");
222 /* start the tracing */
228 /* Sleep for a 1/10 of a second */
230 /* stop the tracing. */
232 trace->ctrl_update(tr);
235 /* check the trace buffer */
236 ret = trace_test_buffer(tr, &count);
239 if (!ret && !count) {
240 printk(KERN_CONT ".. no entries found ..");
245 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
246 DYN_FTRACE_TEST_NAME);
249 ftrace_enabled = save_ftrace_enabled;
250 tracer_enabled = save_tracer_enabled;
252 /* kill ftrace totally if we failed */
258 #endif /* CONFIG_FTRACE */
260 #ifdef CONFIG_IRQSOFF_TRACER
262 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
264 unsigned long save_max = tracing_max_latency;
268 /* start the tracing */
271 /* reset the max latency */
272 tracing_max_latency = 0;
273 /* disable interrupts for a bit */
277 /* stop the tracing. */
279 trace->ctrl_update(tr);
280 /* check both trace buffers */
281 ret = trace_test_buffer(tr, NULL);
283 ret = trace_test_buffer(&max_tr, &count);
286 if (!ret && !count) {
287 printk(KERN_CONT ".. no entries found ..");
291 tracing_max_latency = save_max;
295 #endif /* CONFIG_IRQSOFF_TRACER */
297 #ifdef CONFIG_PREEMPT_TRACER
299 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
301 unsigned long save_max = tracing_max_latency;
305 /* start the tracing */
308 /* reset the max latency */
309 tracing_max_latency = 0;
310 /* disable preemption for a bit */
314 /* stop the tracing. */
316 trace->ctrl_update(tr);
317 /* check both trace buffers */
318 ret = trace_test_buffer(tr, NULL);
320 ret = trace_test_buffer(&max_tr, &count);
323 if (!ret && !count) {
324 printk(KERN_CONT ".. no entries found ..");
328 tracing_max_latency = save_max;
332 #endif /* CONFIG_PREEMPT_TRACER */
334 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
336 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
338 unsigned long save_max = tracing_max_latency;
342 /* start the tracing */
346 /* reset the max latency */
347 tracing_max_latency = 0;
349 /* disable preemption and interrupts for a bit */
354 /* reverse the order of preempt vs irqs */
357 /* stop the tracing. */
359 trace->ctrl_update(tr);
360 /* check both trace buffers */
361 ret = trace_test_buffer(tr, NULL);
365 ret = trace_test_buffer(&max_tr, &count);
369 if (!ret && !count) {
370 printk(KERN_CONT ".. no entries found ..");
375 /* do the test by disabling interrupts first this time */
376 tracing_max_latency = 0;
378 trace->ctrl_update(tr);
383 /* reverse the order of preempt vs irqs */
386 /* stop the tracing. */
388 trace->ctrl_update(tr);
389 /* check both trace buffers */
390 ret = trace_test_buffer(tr, NULL);
394 ret = trace_test_buffer(&max_tr, &count);
396 if (!ret && !count) {
397 printk(KERN_CONT ".. no entries found ..");
404 tracing_max_latency = save_max;
408 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
410 #ifdef CONFIG_SCHED_TRACER
411 static int trace_wakeup_test_thread(void *data)
413 struct completion *x = data;
415 /* Make this a RT thread, doesn't need to be too high */
417 rt_mutex_setprio(current, MAX_RT_PRIO - 5);
419 /* Make it know we have a new prio */
422 /* now go to sleep and let the test wake us up */
423 set_current_state(TASK_INTERRUPTIBLE);
426 /* we are awake, now wait to disappear */
427 while (!kthread_should_stop()) {
429 * This is an RT task, do short sleeps to let
439 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
441 unsigned long save_max = tracing_max_latency;
442 struct task_struct *p;
443 struct completion isrt;
447 init_completion(&isrt);
449 /* create a high prio thread */
450 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
452 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
456 /* make sure the thread is running at an RT prio */
457 wait_for_completion(&isrt);
459 /* start the tracing */
462 /* reset the max latency */
463 tracing_max_latency = 0;
465 /* sleep to let the RT thread sleep too */
469 * Yes this is slightly racy. It is possible that for some
470 * strange reason that the RT thread we created, did not
471 * call schedule for 100ms after doing the completion,
472 * and we do a wakeup on a task that already is awake.
473 * But that is extremely unlikely, and the worst thing that
474 * happens in such a case, is that we disable tracing.
475 * Honestly, if this race does happen something is horrible
476 * wrong with the system.
481 /* stop the tracing. */
483 trace->ctrl_update(tr);
484 /* check both trace buffers */
485 ret = trace_test_buffer(tr, NULL);
487 ret = trace_test_buffer(&max_tr, &count);
492 tracing_max_latency = save_max;
494 /* kill the thread */
497 if (!ret && !count) {
498 printk(KERN_CONT ".. no entries found ..");
504 #endif /* CONFIG_SCHED_TRACER */
506 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
508 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
513 /* start the tracing */
516 /* Sleep for a 1/10 of a second */
518 /* stop the tracing. */
520 trace->ctrl_update(tr);
521 /* check the trace buffer */
522 ret = trace_test_buffer(tr, &count);
525 if (!ret && !count) {
526 printk(KERN_CONT ".. no entries found ..");
532 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */