*/
#include <linux/module.h>
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched.h>
#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
+#ifdef CONFIG_SECURITY_NETWORK
+static void unix_get_peersec_dgram(struct sk_buff *skb)
+{
+ int err;
+
+ err = security_socket_getpeersec_dgram(skb, UNIXSECDATA(skb),
+ UNIXSECLEN(skb));
+ if (err)
+ *(UNIXSECDATA(skb)) = NULL;
+}
+
+static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
+{
+ scm->secdata = *UNIXSECDATA(skb);
+ scm->seclen = *UNIXSECLEN(skb);
+}
+#else
+static void unix_get_peersec_dgram(struct sk_buff *skb)
+{ }
+
+static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
+{ }
+#endif /* CONFIG_SECURITY_NETWORK */
+
/*
* SMP locking strategy:
* hash table is protected with spinlock unix_table_lock
struct sock *sk = NULL;
struct unix_sock *u;
- if (atomic_read(&unix_nr_socks) >= 2*files_stat.max_files)
+ if (atomic_read(&unix_nr_socks) >= 2*get_max_files())
goto out;
sk = sk_alloc(PF_UNIX, GFP_KERNEL, &unix_proto, 1);
u->mnt = NULL;
spin_lock_init(&u->lock);
atomic_set(&u->inflight, sock ? 0 : -1);
- init_MUTEX(&u->readsem); /* single task reading lock */
+ mutex_init(&u->readlock); /* single task reading lock */
init_waitqueue_head(&u->peer_wait);
unix_insert_socket(unix_sockets_unbound, sk);
out:
struct unix_address * addr;
int err;
- down(&u->readsem);
+ mutex_lock(&u->readlock);
err = 0;
if (u->addr)
spin_unlock(&unix_table_lock);
err = 0;
-out: up(&u->readsem);
+out: mutex_unlock(&u->readlock);
return err;
}
goto out;
addr_len = err;
- down(&u->readsem);
+ mutex_lock(&u->readlock);
err = -EINVAL;
if (u->addr)
out_unlock:
spin_unlock(&unix_table_lock);
out_up:
- up(&u->readsem);
+ mutex_unlock(&u->readlock);
out:
return err;
if (siocb->scm->fp)
unix_attach_fds(siocb->scm, skb);
+ unix_get_peersec_dgram(skb);
+
skb->h.raw = skb->data;
err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
if (err)
while(sent < len)
{
/*
- * Optimisation for the fact that under 0.01% of X messages typically
- * need breaking up.
+ * Optimisation for the fact that under 0.01% of X
+ * messages typically need breaking up.
*/
- size=len-sent;
+ size = len-sent;
/* Keep two messages in the pipe so it schedules better */
- if (size > sk->sk_sndbuf / 2 - 64)
- size = sk->sk_sndbuf / 2 - 64;
+ if (size > ((sk->sk_sndbuf >> 1) - 64))
+ size = (sk->sk_sndbuf >> 1) - 64;
if (size > SKB_MAX_ALLOC)
size = SKB_MAX_ALLOC;
msg->msg_namelen = 0;
- down(&u->readsem);
+ mutex_lock(&u->readlock);
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
memset(&tmp_scm, 0, sizeof(tmp_scm));
}
siocb->scm->creds = *UNIXCREDS(skb);
+ unix_set_secdata(siocb->scm, skb);
if (!(flags & MSG_PEEK))
{
out_free:
skb_free_datagram(sk,skb);
out_unlock:
- up(&u->readsem);
+ mutex_unlock(&u->readlock);
out:
return err;
}
memset(&tmp_scm, 0, sizeof(tmp_scm));
}
- down(&u->readsem);
+ mutex_lock(&u->readlock);
do
{
err = -EAGAIN;
if (!timeo)
break;
- up(&u->readsem);
+ mutex_unlock(&u->readlock);
timeo = unix_stream_data_wait(sk, timeo);
err = sock_intr_errno(timeo);
goto out;
}
- down(&u->readsem);
+ mutex_lock(&u->readlock);
continue;
}
}
} while (size);
- up(&u->readsem);
+ mutex_unlock(&u->readlock);
scm_recv(sock, msg, siocb->scm, flags);
out:
return copied ? : err;
mask |= POLLERR;
if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ mask |= POLLRDHUP;
/* readable? */
if (!skb_queue_empty(&sk->sk_receive_queue) ||