X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=arch%2Fsh%2Fmm%2Fpmb.c;h=7d43758dc2442276fe9391a6b75ac823d1e0454b;hb=e7bd34a15b85655f24d1b45edbe3bdfebf9d027e;hp=d0d45e2e0ab319f62e94d0439798d0ac1c6e941f;hpb=8800c0ebf5e8363dab66647512dcef5b83203dec;p=linux-2.6-omap-h63xx.git diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index d0d45e2e0ab..7d43758dc24 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -3,7 +3,7 @@ * * Privileged Space Mapping Buffer (PMB) Support. * - * Copyright (C) 2005, 2006 Paul Mundt + * Copyright (C) 2005, 2006, 2007 Paul Mundt * * P1/P2 Section mapping definitions from map32.h, which was: * @@ -68,6 +68,32 @@ static inline unsigned long mk_pmb_data(unsigned int entry) return mk_pmb_entry(entry) | PMB_DATA; } +static DEFINE_SPINLOCK(pmb_list_lock); +static struct pmb_entry *pmb_list; + +static inline void pmb_list_add(struct pmb_entry *pmbe) +{ + struct pmb_entry **p, *tmp; + + p = &pmb_list; + while ((tmp = *p) != NULL) + p = &tmp->next; + + pmbe->next = tmp; + *p = pmbe; +} + +static inline void pmb_list_del(struct pmb_entry *pmbe) +{ + struct pmb_entry **p, *tmp; + + for (p = &pmb_list; (tmp = *p); p = &tmp->next) + if (tmp == pmbe) { + *p = tmp->next; + return; + } +} + struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, unsigned long flags) { @@ -81,11 +107,19 @@ struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, pmbe->ppn = ppn; pmbe->flags = flags; + spin_lock_irq(&pmb_list_lock); + pmb_list_add(pmbe); + spin_unlock_irq(&pmb_list_lock); + return pmbe; } void pmb_free(struct pmb_entry *pmbe) { + spin_lock_irq(&pmb_list_lock); + pmb_list_del(pmbe); + spin_unlock_irq(&pmb_list_lock); + kmem_cache_free(pmb_cache, pmbe); } @@ -111,7 +145,7 @@ repeat: ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos)); -#ifdef CONFIG_SH_WRITETHROUGH +#ifdef CONFIG_CACHE_WRITETHROUGH /* * When we are in 32-bit address extended mode, CCR.CB becomes * invalid, so care must be taken to manually adjust cacheable @@ -167,31 +201,6 @@ void clear_pmb_entry(struct pmb_entry *pmbe) clear_bit(entry, &pmb_map); } -static DEFINE_SPINLOCK(pmb_list_lock); -static struct pmb_entry *pmb_list; - -static inline void pmb_list_add(struct pmb_entry *pmbe) -{ - struct pmb_entry **p, *tmp; - - p = &pmb_list; - while ((tmp = *p) != NULL) - p = &tmp->next; - - pmbe->next = tmp; - *p = pmbe; -} - -static inline void pmb_list_del(struct pmb_entry *pmbe) -{ - struct pmb_entry **p, *tmp; - - for (p = &pmb_list; (tmp = *p); p = &tmp->next) - if (tmp == pmbe) { - *p = tmp->next; - return; - } -} static struct { unsigned long size; @@ -283,25 +292,14 @@ void pmb_unmap(unsigned long addr) } while (pmbe); } -static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, unsigned long flags) +static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, + unsigned long flags) { struct pmb_entry *pmbe = pmb; memset(pmb, 0, sizeof(struct pmb_entry)); - spin_lock_irq(&pmb_list_lock); - pmbe->entry = PMB_NO_ENTRY; - pmb_list_add(pmbe); - - spin_unlock_irq(&pmb_list_lock); -} - -static void pmb_cache_dtor(void *pmb, struct kmem_cache *cachep, unsigned long flags) -{ - spin_lock_irq(&pmb_list_lock); - pmb_list_del(pmb); - spin_unlock_irq(&pmb_list_lock); } static int __init pmb_init(void) @@ -311,9 +309,8 @@ static int __init pmb_init(void) BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); - pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), - 0, 0, pmb_cache_ctor, pmb_cache_dtor); - BUG_ON(!pmb_cache); + pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0, + SLAB_PANIC, pmb_cache_ctor); jump_to_P2();