struct task_struct *tsk = current;
        struct ring_buffer_event *event = NULL;
        struct blk_io_trace *t;
-       unsigned long flags;
+       unsigned long flags = 0;
        unsigned long *sequence;
        pid_t pid;
        int cpu, pc = 0;
                tracing_record_cmdline(current);
 
                event = ring_buffer_lock_reserve(blk_tr->buffer,
-                                                sizeof(*t) + pdu_len, &flags);
+                                                sizeof(*t) + pdu_len);
                if (!event)
                        return;
 
                        memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
 
                if (blk_tr) {
-                       ring_buffer_unlock_commit(blk_tr->buffer, event, flags);
+                       ring_buffer_unlock_commit(blk_tr->buffer, event);
                        if (pid != 0 &&
                            !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC) &&
                            (trace_flags & TRACE_ITER_STACKTRACE) != 0)
-                               __trace_stack(blk_tr, flags, 5, pc);
+                               __trace_stack(blk_tr, 0, 5, pc);
                        trace_wake_up();
                        return;
                }
 
 
 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size);
 
-struct ring_buffer_event *
-ring_buffer_lock_reserve(struct ring_buffer *buffer,
-                        unsigned long length,
-                        unsigned long *flags);
+struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer,
+                                                  unsigned long length);
 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
-                             struct ring_buffer_event *event,
-                             unsigned long flags);
+                             struct ring_buffer_event *event);
 int ring_buffer_write(struct ring_buffer *buffer,
                      unsigned long length, void *data);
 
 
        struct ring_buffer_event *event;
        struct kmemtrace_alloc_entry *entry;
        struct trace_array *tr = kmemtrace_array;
-       unsigned long irq_flags;
 
        if (!kmem_tracing_enabled)
                return;
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                        &irq_flags);
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
        entry->gfp_flags = gfp_flags;
        entry->node     =       node;
 
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+       ring_buffer_unlock_commit(tr->buffer, event);
 
        trace_wake_up();
 }
        struct ring_buffer_event *event;
        struct kmemtrace_free_entry *entry;
        struct trace_array *tr = kmemtrace_array;
-       unsigned long irq_flags;
 
        if (!kmem_tracing_enabled)
                return;
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                        &irq_flags);
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
        entry->call_site = call_site;
        entry->ptr = ptr;
 
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+       ring_buffer_unlock_commit(tr->buffer, event);
 
        trace_wake_up();
 }
 
  * ring_buffer_lock_reserve - reserve a part of the buffer
  * @buffer: the ring buffer to reserve from
  * @length: the length of the data to reserve (excluding event header)
- * @flags: a pointer to save the interrupt flags
  *
  * Returns a reseverd event on the ring buffer to copy directly to.
  * The user of this interface will need to get the body to write into
  * If NULL is returned, then nothing has been allocated or locked.
  */
 struct ring_buffer_event *
-ring_buffer_lock_reserve(struct ring_buffer *buffer,
-                        unsigned long length,
-                        unsigned long *flags)
+ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        struct ring_buffer_event *event;
  * ring_buffer_unlock_commit - commit a reserved
  * @buffer: The buffer to commit to
  * @event: The event pointer to commit.
- * @flags: the interrupt flags received from ring_buffer_lock_reserve.
  *
  * This commits the data to the ring buffer, and releases any locks held.
  *
  * Must be paired with ring_buffer_lock_reserve.
  */
 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
-                             struct ring_buffer_event *event,
-                             unsigned long flags)
+                             struct ring_buffer_event *event)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        int cpu = raw_smp_processor_id();
 
 {
        struct ring_buffer_event *event;
        struct ftrace_entry *entry;
-       unsigned long irq_flags;
 
        /* If we are reading the ring buffer, don't trace */
        if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
                return;
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                        &irq_flags);
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
        entry->ent.type                 = TRACE_FN;
        entry->ip                       = ip;
        entry->parent_ip                = parent_ip;
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+       ring_buffer_unlock_commit(tr->buffer, event);
 }
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 {
        struct ring_buffer_event *event;
        struct ftrace_graph_ent_entry *entry;
-       unsigned long irq_flags;
 
        if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
                return;
 
-       event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
-                                        &irq_flags);
+       event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry));
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
        tracing_generic_entry_update(&entry->ent, flags, pc);
        entry->ent.type                 = TRACE_GRAPH_ENT;
        entry->graph_ent                        = *trace;
-       ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
+       ring_buffer_unlock_commit(global_trace.buffer, event);
 }
 
 static void __trace_graph_return(struct trace_array *tr,
 {
        struct ring_buffer_event *event;
        struct ftrace_graph_ret_entry *entry;
-       unsigned long irq_flags;
 
        if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
                return;
 
-       event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
-                                        &irq_flags);
+       event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry));
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
        tracing_generic_entry_update(&entry->ent, flags, pc);
        entry->ent.type                 = TRACE_GRAPH_RET;
        entry->ret                              = *trace;
-       ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
+       ring_buffer_unlock_commit(global_trace.buffer, event);
 }
 #endif
 
        struct ring_buffer_event *event;
        struct stack_entry *entry;
        struct stack_trace trace;
-       unsigned long irq_flags;
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                        &irq_flags);
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
        trace.entries           = entry->caller;
 
        save_stack_trace(&trace);
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+       ring_buffer_unlock_commit(tr->buffer, event);
 #endif
 }
 
        struct ring_buffer_event *event;
        struct userstack_entry *entry;
        struct stack_trace trace;
-       unsigned long irq_flags;
 
        if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
                return;
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                        &irq_flags);
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
        trace.entries           = entry->caller;
 
        save_stack_trace_user(&trace);
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+       ring_buffer_unlock_commit(tr->buffer, event);
 #endif
 }
 
        struct ring_buffer_event *event;
        struct trace_array *tr = __tr;
        struct special_entry *entry;
-       unsigned long irq_flags;
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                        &irq_flags);
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
        entry->arg1                     = arg1;
        entry->arg2                     = arg2;
        entry->arg3                     = arg3;
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-       ftrace_trace_stack(tr, irq_flags, 4, pc);
-       ftrace_trace_userstack(tr, irq_flags, pc);
+       ring_buffer_unlock_commit(tr->buffer, event);
+       ftrace_trace_stack(tr, 0, 4, pc);
+       ftrace_trace_userstack(tr, 0, pc);
 
        trace_wake_up();
 }
 {
        struct ring_buffer_event *event;
        struct ctx_switch_entry *entry;
-       unsigned long irq_flags;
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                          &irq_flags);
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
        entry->next_prio                = next->prio;
        entry->next_state               = next->state;
        entry->next_cpu = task_cpu(next);
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+       ring_buffer_unlock_commit(tr->buffer, event);
        ftrace_trace_stack(tr, flags, 5, pc);
        ftrace_trace_userstack(tr, flags, pc);
 }
 {
        struct ring_buffer_event *event;
        struct ctx_switch_entry *entry;
-       unsigned long irq_flags;
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                          &irq_flags);
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
        entry->next_prio                = wakee->prio;
        entry->next_state               = wakee->state;
        entry->next_cpu                 = task_cpu(wakee);
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+       ring_buffer_unlock_commit(tr->buffer, event);
        ftrace_trace_stack(tr, flags, 6, pc);
        ftrace_trace_userstack(tr, flags, pc);
 
        trace_buf[len] = 0;
 
        size = sizeof(*entry) + len + 1;
-       event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags);
+       event = ring_buffer_lock_reserve(tr->buffer, size);
        if (!event)
                goto out_unlock;
        entry = ring_buffer_event_data(event);
 
        memcpy(&entry->buf, trace_buf, len);
        entry->buf[len] = 0;
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+       ring_buffer_unlock_commit(tr->buffer, event);
 
  out_unlock:
        spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
 
 {
        struct ring_buffer_event *event;
        struct trace_boot_call *entry;
-       unsigned long irq_flags;
        struct trace_array *tr = boot_trace;
 
        if (!tr || !pre_initcalls_finished)
        sprint_symbol(bt->func, (unsigned long)fn);
        preempt_disable();
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                        &irq_flags);
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
        if (!event)
                goto out;
        entry   = ring_buffer_event_data(event);
        tracing_generic_entry_update(&entry->ent, 0, 0);
        entry->ent.type = TRACE_BOOT_CALL;
        entry->boot_call = *bt;
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+       ring_buffer_unlock_commit(tr->buffer, event);
 
        trace_wake_up();
 
 {
        struct ring_buffer_event *event;
        struct trace_boot_ret *entry;
-       unsigned long irq_flags;
        struct trace_array *tr = boot_trace;
 
        if (!tr || !pre_initcalls_finished)
        sprint_symbol(bt->func, (unsigned long)fn);
        preempt_disable();
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                        &irq_flags);
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
        if (!event)
                goto out;
        entry   = ring_buffer_event_data(event);
        tracing_generic_entry_update(&entry->ent, 0, 0);
        entry->ent.type = TRACE_BOOT_RET;
        entry->boot_ret = *bt;
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+       ring_buffer_unlock_commit(tr->buffer, event);
 
        trace_wake_up();
 
 
        struct trace_array *tr = branch_tracer;
        struct ring_buffer_event *event;
        struct trace_branch *entry;
-       unsigned long flags, irq_flags;
+       unsigned long flags;
        int cpu, pc;
        const char *p;
 
        if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
                goto out;
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                        &irq_flags);
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
        if (!event)
                goto out;
 
        entry->line = f->line;
        entry->correct = val == expect;
 
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+       ring_buffer_unlock_commit(tr->buffer, event);
 
  out:
        atomic_dec(&tr->data[cpu]->disabled);
 
        struct trace_array *tr = hw_branch_trace;
        struct ring_buffer_event *event;
        struct hw_branch_entry *entry;
-       unsigned long irq1, irq2;
+       unsigned long irq1;
        int cpu;
 
        if (unlikely(!tr))
        if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
                goto out;
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq2);
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
        if (!event)
                goto out;
        entry   = ring_buffer_event_data(event);
        entry->ent.cpu = cpu;
        entry->from = from;
        entry->to   = to;
-       ring_buffer_unlock_commit(tr->buffer, event, irq2);
+       ring_buffer_unlock_commit(tr->buffer, event);
 
  out:
        atomic_dec(&tr->data[cpu]->disabled);
 
 {
        struct ring_buffer_event *event;
        struct trace_mmiotrace_rw *entry;
-       unsigned long irq_flags;
 
-       event   = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                          &irq_flags);
+       event   = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
        if (!event) {
                atomic_inc(&dropped_count);
                return;
        tracing_generic_entry_update(&entry->ent, 0, preempt_count());
        entry->ent.type                 = TRACE_MMIO_RW;
        entry->rw                       = *rw;
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+       ring_buffer_unlock_commit(tr->buffer, event);
 
        trace_wake_up();
 }
 {
        struct ring_buffer_event *event;
        struct trace_mmiotrace_map *entry;
-       unsigned long irq_flags;
 
-       event   = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                          &irq_flags);
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
        if (!event) {
                atomic_inc(&dropped_count);
                return;
        tracing_generic_entry_update(&entry->ent, 0, preempt_count());
        entry->ent.type                 = TRACE_MMIO_MAP;
        entry->map                      = *map;
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+       ring_buffer_unlock_commit(tr->buffer, event);
 
        trace_wake_up();
 }
 
        struct ring_buffer_event *event;
        struct trace_power *entry;
        struct trace_array_cpu *data;
-       unsigned long irq_flags;
        struct trace_array *tr = power_trace;
 
        if (!trace_power_enabled)
        it->end = ktime_get();
        data = tr->data[smp_processor_id()];
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                        &irq_flags);
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
        if (!event)
                goto out;
        entry   = ring_buffer_event_data(event);
        tracing_generic_entry_update(&entry->ent, 0, 0);
        entry->ent.type = TRACE_POWER;
        entry->state_data = *it;
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+       ring_buffer_unlock_commit(tr->buffer, event);
 
        trace_wake_up();
 
        struct ring_buffer_event *event;
        struct trace_power *entry;
        struct trace_array_cpu *data;
-       unsigned long irq_flags;
        struct trace_array *tr = power_trace;
 
        if (!trace_power_enabled)
        it->end = it->stamp;
        data = tr->data[smp_processor_id()];
 
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
-                                        &irq_flags);
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
        if (!event)
                goto out;
        entry   = ring_buffer_event_data(event);
        tracing_generic_entry_update(&entry->ent, 0, 0);
        entry->ent.type = TRACE_POWER;
        entry->state_data = *it;
-       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+       ring_buffer_unlock_commit(tr->buffer, event);
 
        trace_wake_up();