]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - net/core/sock.c
Pull platform-drivers into test branch
[linux-2.6-omap-h63xx.git] / net / core / sock.c
index 51fcfbc041a73d38f0172a77c1e5e48b1224f0b7..0ed5b4f0bc407aef5b376149dcc2d49aa3c49937 100644 (file)
 #include <linux/poll.h>
 #include <linux/tcp.h>
 #include <linux/init.h>
+#include <linux/highmem.h>
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
@@ -187,13 +188,13 @@ static struct lock_class_key af_callback_keys[AF_MAX];
 #define SK_RMEM_MAX            (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
 
 /* Run time adjustable parameters. */
-__u32 sysctl_wmem_max = SK_WMEM_MAX;
-__u32 sysctl_rmem_max = SK_RMEM_MAX;
-__u32 sysctl_wmem_default = SK_WMEM_MAX;
-__u32 sysctl_rmem_default = SK_RMEM_MAX;
+__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
+__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
+__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
+__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
 
 /* Maximal space eaten by iovec or ancilliary data plus some space */
-int sysctl_optmem_max = sizeof(unsigned long)*(2*UIO_MAXIOV + 512);
+int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
 
 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
 {
@@ -247,11 +248,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                goto out;
        }
 
-       /* It would be deadlock, if sock_queue_rcv_skb is used
-          with socket lock! We assume that users of this
-          function are lock free.
-       */
-       err = sk_filter(sk, skb, 1);
+       err = sk_filter(sk, skb);
        if (err)
                goto out;
 
@@ -274,16 +271,19 @@ out:
 }
 EXPORT_SYMBOL(sock_queue_rcv_skb);
 
-int sk_receive_skb(struct sock *sk, struct sk_buff *skb)
+int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
 {
        int rc = NET_RX_SUCCESS;
 
-       if (sk_filter(sk, skb, 0))
+       if (sk_filter(sk, skb))
                goto discard_and_relse;
 
        skb->dev = NULL;
 
-       bh_lock_sock(sk);
+       if (nested)
+               bh_lock_sock_nested(sk);
+       else
+               bh_lock_sock(sk);
        if (!sock_owned_by_user(sk)) {
                /*
                 * trylock + unlock semantics:
@@ -606,15 +606,15 @@ set_rcvbuf:
                        break;
 
                case SO_DETACH_FILTER:
-                       spin_lock_bh(&sk->sk_lock.slock);
-                       filter = sk->sk_filter;
+                       rcu_read_lock_bh();
+                       filter = rcu_dereference(sk->sk_filter);
                         if (filter) {
-                               sk->sk_filter = NULL;
-                               spin_unlock_bh(&sk->sk_lock.slock);
+                               rcu_assign_pointer(sk->sk_filter, NULL);
                                sk_filter_release(sk, filter);
+                               rcu_read_unlock_bh();
                                break;
                        }
-                       spin_unlock_bh(&sk->sk_lock.slock);
+                       rcu_read_unlock_bh();
                        ret = -ENONET;
                        break;
 
@@ -810,24 +810,11 @@ lenout:
  */
 static void inline sock_lock_init(struct sock *sk)
 {
-       spin_lock_init(&sk->sk_lock.slock);
-       sk->sk_lock.owner = NULL;
-       init_waitqueue_head(&sk->sk_lock.wq);
-       /*
-        * Make sure we are not reinitializing a held lock:
-        */
-       debug_check_no_locks_freed((void *)&sk->sk_lock, sizeof(sk->sk_lock));
-
-       /*
-        * Mark both the sk_lock and the sk_lock.slock as a
-        * per-address-family lock class:
-        */
-       lockdep_set_class_and_name(&sk->sk_lock.slock,
-                                  af_family_slock_keys + sk->sk_family,
-                                  af_family_slock_key_strings[sk->sk_family]);
-       lockdep_init_map(&sk->sk_lock.dep_map,
-                        af_family_key_strings[sk->sk_family],
-                        af_family_keys + sk->sk_family);
+       sock_lock_init_class_and_name(sk,
+                       af_family_slock_key_strings[sk->sk_family],
+                       af_family_slock_keys + sk->sk_family,
+                       af_family_key_strings[sk->sk_family],
+                       af_family_keys + sk->sk_family);
 }
 
 /**
@@ -841,7 +828,7 @@ struct sock *sk_alloc(int family, gfp_t priority,
                      struct proto *prot, int zero_it)
 {
        struct sock *sk = NULL;
-       kmem_cache_t *slab = prot->slab;
+       struct kmem_cache *slab = prot->slab;
 
        if (slab != NULL)
                sk = kmem_cache_alloc(slab, priority);
@@ -884,10 +871,10 @@ void sk_free(struct sock *sk)
        if (sk->sk_destruct)
                sk->sk_destruct(sk);
 
-       filter = sk->sk_filter;
+       filter = rcu_dereference(sk->sk_filter);
        if (filter) {
                sk_filter_release(sk, filter);
-               sk->sk_filter = NULL;
+               rcu_assign_pointer(sk->sk_filter, NULL);
        }
 
        sock_disable_timestamp(sk);
@@ -911,7 +898,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
        if (newsk != NULL) {
                struct sk_filter *filter;
 
-               memcpy(newsk, sk, sk->sk_prot->obj_size);
+               sock_copy(newsk, sk);
 
                /* SANITY */
                sk_node_init(&newsk->sk_node);
@@ -1164,7 +1151,7 @@ static struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
                        goto failure;
 
                if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
-                       skb = alloc_skb(header_len, sk->sk_allocation);
+                       skb = alloc_skb(header_len, gfp_mask);
                        if (skb) {
                                int npages;
                                int i;
@@ -1531,7 +1518,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
        atomic_set(&sk->sk_refcnt, 1);
 }
 
-void fastcall lock_sock(struct sock *sk)
+void fastcall lock_sock_nested(struct sock *sk, int subclass)
 {
        might_sleep();
        spin_lock_bh(&sk->sk_lock.slock);
@@ -1542,11 +1529,11 @@ void fastcall lock_sock(struct sock *sk)
        /*
         * The sk_lock has mutex_lock() semantics here:
         */
-       mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
+       mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
        local_bh_enable();
 }
 
-EXPORT_SYMBOL(lock_sock);
+EXPORT_SYMBOL(lock_sock_nested);
 
 void fastcall release_sock(struct sock *sk)
 {