X-Git-Url: http://pilppa.org/gitweb/?a=blobdiff_plain;f=mm%2Fslub.c;h=903dabd92daf00436452da7fd714816d1a45f504;hb=dc512814b5f8b7b80c74d6170b93a606ae4f36c5;hp=bcdb2c8941a5fcb428d4d3d256106f29e921debd;hpb=00cda56d39f013cce60f44f1e3da19b87eba5d85;p=linux-2.6-omap-h63xx.git diff --git a/mm/slub.c b/mm/slub.c index bcdb2c8941a..903dabd92da 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -172,7 +172,7 @@ static inline void ClearSlabDebug(struct page *page) * Mininum number of partial slabs. These will be left on the partial * lists even if they are empty. kmem_cache_shrink may reclaim them. */ -#define MIN_PARTIAL 2 +#define MIN_PARTIAL 5 /* * Maximum number of desirable partial slabs. @@ -1080,7 +1080,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) struct page *page; struct kmem_cache_node *n; void *start; - void *end; void *last; void *p; @@ -1101,7 +1100,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) SetSlabDebug(page); start = page_address(page); - end = start + s->objects * s->size; if (unlikely(s->flags & SLAB_POISON)) memset(start, POISON_INUSE, PAGE_SIZE << s->order); @@ -1511,26 +1509,8 @@ new_slab: if (new) { c = get_cpu_slab(s, smp_processor_id()); - if (c->page) { - /* - * Someone else populated the cpu_slab while we - * enabled interrupts, or we have gotten scheduled - * on another cpu. The page may not be on the - * requested node even if __GFP_THISNODE was - * specified. So we need to recheck. - */ - if (node_match(c, node)) { - /* - * Current cpuslab is acceptable and we - * want the current one since its cache hot - */ - discard_slab(s, new); - slab_lock(c->page); - goto load_freelist; - } - /* New slab does not fit our expectations */ + if (c->page) flush_slab(s, c); - } slab_lock(new); SetSlabFrozen(new); c->page = new; @@ -1633,7 +1613,7 @@ checks_ok: * then add it. */ if (unlikely(!prior)) - add_partial(get_node(s, page_to_nid(page)), page); + add_partial_tail(get_node(s, page_to_nid(page)), page); out_unlock: slab_unlock(page); @@ -2578,8 +2558,12 @@ size_t ksize(const void *object) if (unlikely(object == ZERO_SIZE_PTR)) return 0; - page = get_object_page(object); + page = virt_to_head_page(object); BUG_ON(!page); + + if (unlikely(!PageSlab(page))) + return PAGE_SIZE << compound_order(page); + s = page->slab; BUG_ON(!s); @@ -3092,6 +3076,19 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, return slab_alloc(s, gfpflags, node, caller); } +static unsigned long count_partial(struct kmem_cache_node *n) +{ + unsigned long flags; + unsigned long x = 0; + struct page *page; + + spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, lru) + x += page->inuse; + spin_unlock_irqrestore(&n->list_lock, flags); + return x; +} + #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) static int validate_slab(struct kmem_cache *s, struct page *page, unsigned long *map) @@ -3474,19 +3471,6 @@ static int list_locations(struct kmem_cache *s, char *buf, return n; } -static unsigned long count_partial(struct kmem_cache_node *n) -{ - unsigned long flags; - unsigned long x = 0; - struct page *page; - - spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry(page, &n->partial, lru) - x += page->inuse; - spin_unlock_irqrestore(&n->list_lock, flags); - return x; -} - enum slab_stat_type { SL_FULL, SL_PARTIAL, @@ -4139,3 +4123,82 @@ static int __init slab_sysfs_init(void) __initcall(slab_sysfs_init); #endif + +/* + * The /proc/slabinfo ABI + */ +#ifdef CONFIG_PROC_FS + +static void print_slabinfo_header(struct seq_file *m) +{ + seq_puts(m, "slabinfo - version: 2.1\n"); + seq_puts(m, "# name " + " "); + seq_puts(m, " : tunables "); + seq_puts(m, " : slabdata "); + seq_putc(m, '\n'); +} + +static void *s_start(struct seq_file *m, loff_t *pos) +{ + loff_t n = *pos; + + down_read(&slub_lock); + if (!n) + print_slabinfo_header(m); + + return seq_list_start(&slab_caches, *pos); +} + +static void *s_next(struct seq_file *m, void *p, loff_t *pos) +{ + return seq_list_next(p, &slab_caches, pos); +} + +static void s_stop(struct seq_file *m, void *p) +{ + up_read(&slub_lock); +} + +static int s_show(struct seq_file *m, void *p) +{ + unsigned long nr_partials = 0; + unsigned long nr_slabs = 0; + unsigned long nr_inuse = 0; + unsigned long nr_objs; + struct kmem_cache *s; + int node; + + s = list_entry(p, struct kmem_cache, list); + + for_each_online_node(node) { + struct kmem_cache_node *n = get_node(s, node); + + if (!n) + continue; + + nr_partials += n->nr_partial; + nr_slabs += atomic_long_read(&n->nr_slabs); + nr_inuse += count_partial(n); + } + + nr_objs = nr_slabs * s->objects; + nr_inuse += (nr_slabs - nr_partials) * s->objects; + + seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse, + nr_objs, s->size, s->objects, (1 << s->order)); + seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0); + seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs, + 0UL); + seq_putc(m, '\n'); + return 0; +} + +const struct seq_operations slabinfo_op = { + .start = s_start, + .next = s_next, + .stop = s_stop, + .show = s_show, +}; + +#endif /* CONFIG_PROC_FS */