]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - include/net/inet_hashtables.h
[ARM] 4759/1: [AT91] Buttons on CSB300
[linux-2.6-omap-h63xx.git] / include / net / inet_hashtables.h
index 8228b57eb18f544aebec20063b18c91ab5d1e323..37f6cb11212723f7f2094a16b6cec92bbe9ef670 100644 (file)
 #include <linux/spinlock.h>
 #include <linux/types.h>
 #include <linux/wait.h>
+#include <linux/vmalloc.h>
 
 #include <net/inet_connection_sock.h>
 #include <net/inet_sock.h>
-#include <net/route.h>
 #include <net/sock.h>
 #include <net/tcp_states.h>
 
@@ -38,7 +38,6 @@
  * I'll experiment with dynamic table growth later.
  */
 struct inet_ehash_bucket {
-       rwlock_t          lock;
        struct hlist_head chain;
        struct hlist_head twchain;
 };
@@ -101,6 +100,9 @@ struct inet_hashinfo {
         * TIME_WAIT sockets use a separate chain (twchain).
         */
        struct inet_ehash_bucket        *ehash;
+       rwlock_t                        *ehash_locks;
+       unsigned int                    ehash_size;
+       unsigned int                    ehash_locks_mask;
 
        /* Ok, let's try this, I give up, we do need a local binding
         * TCP hash as well as the others for fast bind/connect.
@@ -108,7 +110,7 @@ struct inet_hashinfo {
        struct inet_bind_hashbucket     *bhash;
 
        unsigned int                    bhash_size;
-       unsigned int                    ehash_size;
+       /* Note : 4 bytes padding on 64 bit arches */
 
        /* All sockets in TCP_LISTEN state will be in here.  This is the only
         * table where wildcard'd TCP sockets can exist.  Hash function here
@@ -135,6 +137,61 @@ static inline struct inet_ehash_bucket *inet_ehash_bucket(
        return &hashinfo->ehash[hash & (hashinfo->ehash_size - 1)];
 }
 
+static inline rwlock_t *inet_ehash_lockp(
+       struct inet_hashinfo *hashinfo,
+       unsigned int hash)
+{
+       return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
+}
+
+static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
+{
+       unsigned int i, size = 256;
+#if defined(CONFIG_PROVE_LOCKING)
+       unsigned int nr_pcpus = 2;
+#else
+       unsigned int nr_pcpus = num_possible_cpus();
+#endif
+       if (nr_pcpus >= 4)
+               size = 512;
+       if (nr_pcpus >= 8)
+               size = 1024;
+       if (nr_pcpus >= 16)
+               size = 2048;
+       if (nr_pcpus >= 32)
+               size = 4096;
+       if (sizeof(rwlock_t) != 0) {
+#ifdef CONFIG_NUMA
+               if (size * sizeof(rwlock_t) > PAGE_SIZE)
+                       hashinfo->ehash_locks = vmalloc(size * sizeof(rwlock_t));
+               else
+#endif
+               hashinfo->ehash_locks = kmalloc(size * sizeof(rwlock_t),
+                                               GFP_KERNEL);
+               if (!hashinfo->ehash_locks)
+                       return ENOMEM;
+               for (i = 0; i < size; i++)
+                       rwlock_init(&hashinfo->ehash_locks[i]);
+       }
+       hashinfo->ehash_locks_mask = size - 1;
+       return 0;
+}
+
+static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
+{
+       if (hashinfo->ehash_locks) {
+#ifdef CONFIG_NUMA
+               unsigned int size = (hashinfo->ehash_locks_mask + 1) *
+                                                       sizeof(rwlock_t);
+               if (size > PAGE_SIZE)
+                       vfree(hashinfo->ehash_locks);
+               else
+#endif
+               kfree(hashinfo->ehash_locks);
+               hashinfo->ehash_locks = NULL;
+       }
+}
+
 extern struct inet_bind_bucket *
                    inet_bind_bucket_create(struct kmem_cache *cachep,
                                            struct inet_bind_hashbucket *head,
@@ -223,7 +280,7 @@ static inline void __inet_hash(struct inet_hashinfo *hashinfo,
                sk->sk_hash = inet_sk_ehashfn(sk);
                head = inet_ehash_bucket(hashinfo, sk->sk_hash);
                list = &head->chain;
-               lock = &head->lock;
+               lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
                write_lock(lock);
        }
        __sk_add_node(sk, list);
@@ -254,7 +311,7 @@ static inline void inet_unhash(struct inet_hashinfo *hashinfo, struct sock *sk)
                inet_listen_wlock(hashinfo);
                lock = &hashinfo->lhash_lock;
        } else {
-               lock = &inet_ehash_bucket(hashinfo, sk->sk_hash)->lock;
+               lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
                write_lock_bh(lock);
        }
 
@@ -266,11 +323,6 @@ out:
                wake_up(&hashinfo->lhash_wait);
 }
 
-static inline int inet_iif(const struct sk_buff *skb)
-{
-       return ((struct rtable *)skb->dst)->rt_iif;
-}
-
 extern struct sock *__inet_lookup_listener(struct inet_hashinfo *hashinfo,
                                           const __be32 daddr,
                                           const unsigned short hnum,
@@ -360,9 +412,10 @@ static inline struct sock *
         */
        unsigned int hash = inet_ehashfn(daddr, hnum, saddr, sport);
        struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
+       rwlock_t *lock = inet_ehash_lockp(hashinfo, hash);
 
        prefetch(head->chain.first);
-       read_lock(&head->lock);
+       read_lock(lock);
        sk_for_each(sk, node, &head->chain) {
                if (INET_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
                        goto hit; /* You sunk my battleship! */
@@ -375,7 +428,7 @@ static inline struct sock *
        }
        sk = NULL;
 out:
-       read_unlock(&head->lock);
+       read_unlock(lock);
        return sk;
 hit:
        sock_hold(sk);