#include <linux/poll.h>
#include <linux/tcp.h>
#include <linux/init.h>
+#include <linux/highmem.h>
#include <asm/uaccess.h>
#include <asm/system.h>
}
EXPORT_SYMBOL(sock_queue_rcv_skb);
-int sk_receive_skb(struct sock *sk, struct sk_buff *skb)
+int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
{
int rc = NET_RX_SUCCESS;
skb->dev = NULL;
- bh_lock_sock(sk);
+ if (nested)
+ bh_lock_sock_nested(sk);
+ else
+ bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) {
/*
* trylock + unlock semantics:
goto failure;
if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
- skb = alloc_skb(header_len, sk->sk_allocation);
+ skb = alloc_skb(header_len, gfp_mask);
if (skb) {
int npages;
int i;
atomic_set(&sk->sk_refcnt, 1);
}
-void fastcall lock_sock(struct sock *sk)
+void fastcall lock_sock_nested(struct sock *sk, int subclass)
{
might_sleep();
spin_lock_bh(&sk->sk_lock.slock);
/*
* The sk_lock has mutex_lock() semantics here:
*/
- mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
+ mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
local_bh_enable();
}
-EXPORT_SYMBOL(lock_sock);
+EXPORT_SYMBOL(lock_sock_nested);
void fastcall release_sock(struct sock *sk)
{