*     * prototype, declare it via TP_PROTO():
  *     *
  *
- *     TP_PROTO(struct rq *rq, struct task_struct *prev,
- *              struct task_struct *next),
+ *     TP_PROTO(struct rq *rq, struct task_struct *prev,
+ *              struct task_struct *next),
  *
  *     *
  *     * Define the call signature of the 'function'.
  *     *  TP_PROTO1/TP_PROTO2/TP_PROTO3 ugliness.)
  *     *
  *
- *     TP_ARGS(rq, prev, next),
+ *     TP_ARGS(rq, prev, next),
  *
  *     *
  *     * Fast binary tracing: define the trace record via
  *     * happens, on an active tracepoint.
  *     *
  *
- *     TP_fast_assign(
- *             memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
- *             __entry->prev_pid       = prev->pid;
- *             __entry->prev_prio      = prev->prio;
+ *     TP_fast_assign(
+ *             memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
+ *             __entry->prev_pid       = prev->pid;
+ *             __entry->prev_prio      = prev->prio;
  *             memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
  *             __entry->next_pid       = next->pid;
- *             __entry->next_prio      = next->prio;
+ *             __entry->next_prio      = next->prio;
  *     )
  *
  *     *
 
 static int __read_mostly  blk_tracer_enabled;
 
 /* Select an alternative, minimalistic output than the original one */
-#define TRACE_BLK_OPT_CLASSIC  0x1
+#define TRACE_BLK_OPT_CLASSIC  0x1
 
 static struct tracer_opt blk_tracer_opts[] = {
        /* Default disable the minimalistic output */
 /**
  * blk_trace_ioctl: - handle the ioctls associated with tracing
  * @bdev:      the block device
- * @cmd:       the ioctl cmd
+ * @cmd:       the ioctl cmd
  * @arg:       the argument data, if any
  *
  **/
 
 static struct {
        const char *act[2];
-       int        (*print)(struct trace_seq *s, const struct trace_entry *ent);
+       int        (*print)(struct trace_seq *s, const struct trace_entry *ent);
 } what2act[] __read_mostly = {
-       [__BLK_TA_QUEUE]        = {{  "Q", "queue" },      blk_log_generic },
+       [__BLK_TA_QUEUE]        = {{  "Q", "queue" },      blk_log_generic },
        [__BLK_TA_BACKMERGE]    = {{  "M", "backmerge" },  blk_log_generic },
        [__BLK_TA_FRONTMERGE]   = {{  "F", "frontmerge" }, blk_log_generic },
        [__BLK_TA_GETRQ]        = {{  "G", "getrq" },      blk_log_generic },
 };
 
 static struct trace_event trace_blk_event = {
-       .type           = TRACE_BLK,
+       .type           = TRACE_BLK,
        .trace          = blk_trace_event_print,
        .binary         = blk_trace_event_print_binary,
 };
 
 
        entry->preempt_count            = pc & 0xff;
        entry->pid                      = (tsk) ? tsk->pid : 0;
-       entry->tgid                     = (tsk) ? tsk->tgid : 0;
+       entry->tgid                     = (tsk) ? tsk->tgid : 0;
        entry->flags =
 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
                (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
 
 
 
 static struct trace_event trace_branch_event = {
-       .type           = TRACE_BRANCH,
+       .type           = TRACE_BRANCH,
        .trace          = trace_branch_print,
 };
 
 
  *
  * static void ftrace_event_<call>(proto)
  * {
- *     event_trace_printk(_RET_IP_, "<call>: " <fmt>);
+ *     event_trace_printk(_RET_IP_, "<call>: " <fmt>);
  * }
  *
  * static int ftrace_reg_event_<call>(void)
  * {
- *     int ret;
+ *     int ret;
  *
- *     ret = register_trace_<call>(ftrace_event_<call>);
- *     if (!ret)
- *             pr_info("event trace: Could not activate trace point "
- *                     "probe to  <call>");
- *     return ret;
+ *     ret = register_trace_<call>(ftrace_event_<call>);
+ *     if (!ret)
+ *             pr_info("event trace: Could not activate trace point "
+ *                     "probe to  <call>");
+ *     return ret;
  * }
  *
  * static void ftrace_unreg_event_<call>(void)
  * {
- *     unregister_trace_<call>(ftrace_event_<call>);
+ *     unregister_trace_<call>(ftrace_event_<call>);
  * }
  *
  * For those macros defined with TRACE_FORMAT:
  * static struct ftrace_event_call __used
  * __attribute__((__aligned__(4)))
  * __attribute__((section("_ftrace_events"))) event_<call> = {
- *     .name                   = "<call>",
- *     .regfunc                = ftrace_reg_event_<call>,
- *     .unregfunc              = ftrace_unreg_event_<call>,
+ *     .name                   = "<call>",
+ *     .regfunc                = ftrace_reg_event_<call>,
+ *     .unregfunc              = ftrace_unreg_event_<call>,
  * }
  *
  *
  *
  * static void ftrace_raw_event_<call>(proto)
  * {
- *     struct ring_buffer_event *event;
- *     struct ftrace_raw_<call> *entry; <-- defined in stage 1
- *     unsigned long irq_flags;
- *     int pc;
- *
- *     local_save_flags(irq_flags);
- *     pc = preempt_count();
- *
- *     event = trace_current_buffer_lock_reserve(event_<call>.id,
- *                               sizeof(struct ftrace_raw_<call>),
- *                               irq_flags, pc);
- *     if (!event)
- *             return;
- *     entry   = ring_buffer_event_data(event);
- *
- *     <assign>;  <-- Here we assign the entries by the __field and
+ *     struct ring_buffer_event *event;
+ *     struct ftrace_raw_<call> *entry; <-- defined in stage 1
+ *     unsigned long irq_flags;
+ *     int pc;
+ *
+ *     local_save_flags(irq_flags);
+ *     pc = preempt_count();
+ *
+ *     event = trace_current_buffer_lock_reserve(event_<call>.id,
+ *                               sizeof(struct ftrace_raw_<call>),
+ *                               irq_flags, pc);
+ *     if (!event)
+ *             return;
+ *     entry   = ring_buffer_event_data(event);
+ *
+ *     <assign>;  <-- Here we assign the entries by the __field and
  *                     __array macros.
  *
- *     trace_current_buffer_unlock_commit(event, irq_flags, pc);
+ *     trace_current_buffer_unlock_commit(event, irq_flags, pc);
  * }
  *
  * static int ftrace_raw_reg_event_<call>(void)
  * {
- *     int ret;
+ *     int ret;
  *
- *     ret = register_trace_<call>(ftrace_raw_event_<call>);
- *     if (!ret)
- *             pr_info("event trace: Could not activate trace point "
- *                     "probe to <call>");
- *     return ret;
+ *     ret = register_trace_<call>(ftrace_raw_event_<call>);
+ *     if (!ret)
+ *             pr_info("event trace: Could not activate trace point "
+ *                     "probe to <call>");
+ *     return ret;
  * }
  *
  * static void ftrace_unreg_event_<call>(void)
  * {
- *     unregister_trace_<call>(ftrace_raw_event_<call>);
+ *     unregister_trace_<call>(ftrace_raw_event_<call>);
  * }
  *
  * static struct trace_event ftrace_event_type_<call> = {
- *     .trace                  = ftrace_raw_output_<call>, <-- stage 2
+ *     .trace                  = ftrace_raw_output_<call>, <-- stage 2
  * };
  *
  * static int ftrace_raw_init_event_<call>(void)
  * {
- *     int id;
+ *     int id;
  *
- *     id = register_ftrace_event(&ftrace_event_type_<call>);
- *     if (!id)
- *             return -ENODEV;
- *     event_<call>.id = id;
- *     return 0;
+ *     id = register_ftrace_event(&ftrace_event_type_<call>);
+ *     if (!id)
+ *             return -ENODEV;
+ *     event_<call>.id = id;
+ *     return 0;
  * }
  *
  * static struct ftrace_event_call __used
  * __attribute__((__aligned__(4)))
  * __attribute__((section("_ftrace_events"))) event_<call> = {
- *     .name                   = "<call>",
+ *     .name                   = "<call>",
  *     .system                 = "<system>",
- *     .raw_init               = ftrace_raw_init_event_<call>,
- *     .regfunc                = ftrace_reg_event_<call>,
- *     .unregfunc              = ftrace_unreg_event_<call>,
+ *     .raw_init               = ftrace_raw_init_event_<call>,
+ *     .regfunc                = ftrace_reg_event_<call>,
+ *     .unregfunc              = ftrace_unreg_event_<call>,
  *     .show_format            = ftrace_format_<call>,
  * }
  *
 static struct ftrace_event_call __used                                 \
 __attribute__((__aligned__(4)))                                                \
 __attribute__((section("_ftrace_events"))) event_##call = {            \
-       .name                   = #call,                                \
+       .name                   = #call,                                \
        .system                 = __stringify(TRACE_SYSTEM),            \
        .regfunc                = ftrace_reg_event_##call,              \
        .unregfunc              = ftrace_unreg_event_##call,            \
        pc = preempt_count();                                           \
                                                                        \
        event = trace_current_buffer_lock_reserve(event_##call.id,      \
-                                 sizeof(struct ftrace_raw_##call),     \
+                                 sizeof(struct ftrace_raw_##call),     \
                                  irq_flags, pc);                       \
        if (!event)                                                     \
                return;                                                 \
 static struct ftrace_event_call __used                                 \
 __attribute__((__aligned__(4)))                                                \
 __attribute__((section("_ftrace_events"))) event_##call = {            \
-       .name                   = #call,                                \
+       .name                   = #call,                                \
        .system                 = __stringify(TRACE_SYSTEM),            \
        .raw_init               = ftrace_raw_init_event_##call,         \
        .regfunc                = ftrace_raw_reg_event_##call,          \
 
 static struct ftrace_event_call __used                                 \
 __attribute__((__aligned__(4)))                                                \
 __attribute__((section("_ftrace_events"))) event_##call = {            \
-       .name                   = #call,                                \
+       .name                   = #call,                                \
        .id                     = proto,                                \
        .system                 = __stringify(TRACE_SYSTEM),            \
        .show_format            = ftrace_format_##call,                 \
 
 }
 
 static struct tracer graph_trace __read_mostly = {
-       .name           = "function_graph",
+       .name           = "function_graph",
        .open           = graph_trace_open,
        .close          = graph_trace_close,
        .wait_pipe      = poll_wait_pipe,
-       .init           = graph_trace_init,
-       .reset          = graph_trace_reset,
+       .init           = graph_trace_init,
+       .reset          = graph_trace_reset,
        .print_line     = print_graph_function,
        .print_header   = print_graph_headers,
        .flags          = &tracer_flags,
 
 }
 
 static struct trace_event trace_fn_event = {
-       .type           = TRACE_FN,
+       .type           = TRACE_FN,
        .trace          = trace_fn_trace,
        .raw            = trace_fn_raw,
        .hex            = trace_fn_hex,
 }
 
 static struct trace_event trace_ctx_event = {
-       .type           = TRACE_CTX,
+       .type           = TRACE_CTX,
        .trace          = trace_ctx_print,
        .raw            = trace_ctx_raw,
        .hex            = trace_ctx_hex,
 };
 
 static struct trace_event trace_wake_event = {
-       .type           = TRACE_WAKE,
+       .type           = TRACE_WAKE,
        .trace          = trace_wake_print,
        .raw            = trace_wake_raw,
        .hex            = trace_wake_hex,
 }
 
 static struct trace_event trace_special_event = {
-       .type           = TRACE_SPECIAL,
+       .type           = TRACE_SPECIAL,
        .trace          = trace_special_print,
        .raw            = trace_special_print,
        .hex            = trace_special_hex,
 }
 
 static struct trace_event trace_stack_event = {
-       .type           = TRACE_STACK,
+       .type           = TRACE_STACK,
        .trace          = trace_stack_print,
        .raw            = trace_special_print,
        .hex            = trace_special_hex,
 }
 
 static struct trace_event trace_user_stack_event = {
-       .type           = TRACE_USER_STACK,
+       .type           = TRACE_USER_STACK,
        .trace          = trace_user_stack_print,
        .raw            = trace_special_print,
        .hex            = trace_special_hex,
 
 
 static struct trace_event trace_print_event = {
-       .type           = TRACE_PRINT,
+       .type           = TRACE_PRINT,
        .trace          = trace_print_print,
        .raw            = trace_print_raw,
 };
 
 /* Useful to know if we print the cpu headers */
        bool                        first_entry;
        int                         cpu;
-       pid_t                       pid;
+       pid_t                       pid;
 /* Can be inserted from interrupt or user context, need to be atomic */
-       atomic_t                    inserted;
+       atomic_t                    inserted;
 /*
  *  Don't need to be atomic, works are serialized in a single workqueue thread
  *  on a single CPU.
  */
-       unsigned int                executed;
+       unsigned int                executed;
 };
 
 /* List of workqueue threads on one cpu */