unsigned long __read_mostly    tracing_max_latency = (cycle_t)ULONG_MAX;
 unsigned long __read_mostly    tracing_thresh;
 
+/* dummy trace to disable tracing */
+static struct tracer no_tracer __read_mostly =
+{
+       .name           = "none",
+};
+
+static int trace_alloc_page(void);
+static int trace_free_page(void);
+
 static int tracing_disabled = 1;
 
 long
        return read;
 }
 
+static ssize_t
+tracing_entries_read(struct file *filp, char __user *ubuf,
+                    size_t cnt, loff_t *ppos)
+{
+       struct trace_array *tr = filp->private_data;
+       char buf[64];
+       int r;
+
+       r = sprintf(buf, "%lu\n", tr->entries);
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t
+tracing_entries_write(struct file *filp, const char __user *ubuf,
+                     size_t cnt, loff_t *ppos)
+{
+       unsigned long val;
+       char buf[64];
+
+       if (cnt > 63)
+               cnt = 63;
+
+       if (copy_from_user(&buf, ubuf, cnt))
+               return -EFAULT;
+
+       buf[cnt] = 0;
+
+       val = simple_strtoul(buf, NULL, 10);
+
+       /* must have at least 1 entry */
+       if (!val)
+               return -EINVAL;
+
+       mutex_lock(&trace_types_lock);
+
+       if (current_trace != &no_tracer) {
+               cnt = -EBUSY;
+               pr_info("ftrace: set current_tracer to none"
+                       " before modifying buffer size\n");
+               goto out;
+       }
+
+       if (val > global_trace.entries) {
+               while (global_trace.entries < val) {
+                       if (trace_alloc_page()) {
+                               cnt = -ENOMEM;
+                               goto out;
+                       }
+               }
+       } else {
+               /* include the number of entries in val (inc of page entries) */
+               while (global_trace.entries > val + (ENTRIES_PER_PAGE - 1))
+                       trace_free_page();
+       }
+
+       filp->f_pos += cnt;
+
+ out:
+       max_tr.entries = global_trace.entries;
+       mutex_unlock(&trace_types_lock);
+
+       return cnt;
+}
+
 static struct file_operations tracing_max_lat_fops = {
        .open           = tracing_open_generic,
        .read           = tracing_max_lat_read,
        .release        = tracing_release_pipe,
 };
 
+static struct file_operations tracing_entries_fops = {
+       .open           = tracing_open_generic,
+       .read           = tracing_entries_read,
+       .write          = tracing_entries_write,
+};
+
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 static ssize_t
                pr_warning("Could not create debugfs "
                           "'tracing_threash' entry\n");
 
+       entry = debugfs_create_file("trace_entries", 0644, d_tracer,
+                                   &global_trace, &tracing_entries_fops);
+       if (!entry)
+               pr_warning("Could not create debugfs "
+                          "'tracing_threash' entry\n");
+
 #ifdef CONFIG_DYNAMIC_FTRACE
        entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
                                    &ftrace_update_tot_cnt,
 #endif
 }
 
-/* dummy trace to disable tracing */
-static struct tracer no_tracer __read_mostly =
-{
-       .name           = "none",
-};
-
 static int trace_alloc_page(void)
 {
        struct trace_array_cpu *data;
        /* Now that we successfully allocate a page per CPU, add them */
        for_each_possible_cpu(i) {
                data = global_trace.data[i];
-               data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
                page = list_entry(pages.next, struct page, lru);
                list_del_init(&page->lru);
                list_add_tail(&page->lru, &data->trace_pages);
 
 #ifdef CONFIG_TRACER_MAX_TRACE
                data = max_tr.data[i];
-               data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
                page = list_entry(pages.next, struct page, lru);
                list_del_init(&page->lru);
                list_add_tail(&page->lru, &data->trace_pages);
        return -ENOMEM;
 }
 
+static int trace_free_page(void)
+{
+       struct trace_array_cpu *data;
+       struct page *page;
+       struct list_head *p;
+       int i;
+       int ret = 0;
+
+       /* free one page from each buffer */
+       for_each_possible_cpu(i) {
+               data = global_trace.data[i];
+               p = data->trace_pages.next;
+               if (p == &data->trace_pages) {
+                       /* should never happen */
+                       WARN_ON(1);
+                       tracing_disabled = 1;
+                       ret = -1;
+                       break;
+               }
+               page = list_entry(p, struct page, lru);
+               ClearPageLRU(page);
+               list_del(&page->lru);
+               __free_page(page);
+
+               tracing_reset(data);
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+               data = max_tr.data[i];
+               p = data->trace_pages.next;
+               if (p == &data->trace_pages) {
+                       /* should never happen */
+                       WARN_ON(1);
+                       tracing_disabled = 1;
+                       ret = -1;
+                       break;
+               }
+               page = list_entry(p, struct page, lru);
+               ClearPageLRU(page);
+               list_del(&page->lru);
+               __free_page(page);
+
+               tracing_reset(data);
+#endif
+       }
+       global_trace.entries -= ENTRIES_PER_PAGE;
+
+       return ret;
+}
+
 __init static int tracer_alloc_buffers(void)
 {
        struct trace_array_cpu *data;
                /* use the LRU flag to differentiate the two buffers */
                ClearPageLRU(page);
 
+               data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+               max_tr.data[i]->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+
 /* Only allocate if we are actually using the max trace */
 #ifdef CONFIG_TRACER_MAX_TRACE
                array = (void *)__get_free_page(GFP_KERNEL);