]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - include/linux/slub_def.h
Merge branch 'linux-next' of git://git.infradead.org/ubi-2.6
[linux-2.6-omap-h63xx.git] / include / linux / slub_def.h
index 9e3a575b2c30272ced3b97c115fb2b87925db392..5046f90c11710178127a20a7bcb8677b4e80d943 100644 (file)
@@ -47,7 +47,6 @@ struct kmem_cache_cpu {
 struct kmem_cache_node {
        spinlock_t list_lock;   /* Protect partial list and nr_partial */
        unsigned long nr_partial;
-       unsigned long min_partial;
        struct list_head partial;
 #ifdef CONFIG_SLUB_DEBUG
        atomic_long_t nr_slabs;
@@ -90,6 +89,7 @@ struct kmem_cache {
        void (*ctor)(void *);
        int inuse;              /* Offset to metadata */
        int align;              /* Alignment */
+       unsigned long min_partial;
        const char *name;       /* Name (only for display!) */
        struct list_head list;  /* List of slab caches */
 #ifdef CONFIG_SLUB_DEBUG
@@ -130,9 +130,9 @@ struct kmem_cache {
  * This should be dropped to PAGE_SIZE / 2 once the page allocator
  * "fastpath" becomes competitive with the slab allocator fastpaths.
  */
-#define SLUB_MAX_SIZE (PAGE_SIZE)
+#define SLUB_MAX_SIZE (2 * PAGE_SIZE)
 
-#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 1)
+#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)
 
 /*
  * We keep the general caches in an array of slab caches that are used for
@@ -233,8 +233,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
        unsigned int order = get_order(size);
        void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
 
-       kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
-                            size, PAGE_SIZE << order, flags);
+       trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags);
 
        return ret;
 }
@@ -255,9 +254,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
 
                        ret = kmem_cache_alloc_notrace(s, flags);
 
-                       kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
-                                            _THIS_IP_, ret,
-                                            size, s->size, flags);
+                       trace_kmalloc(_THIS_IP_, ret, size, s->size, flags);
 
                        return ret;
                }
@@ -296,9 +293,8 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 
                ret = kmem_cache_alloc_node_notrace(s, flags, node);
 
-               kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
-                                         _THIS_IP_, ret,
-                                         size, s->size, flags, node);
+               trace_kmalloc_node(_THIS_IP_, ret,
+                                  size, s->size, flags, node);
 
                return ret;
        }