struct iovec;
 struct kvec;
 
+enum {
+       SOCK_WAKE_IO,
+       SOCK_WAKE_WAITD,
+       SOCK_WAKE_SPACE,
+       SOCK_WAKE_URG,
+};
+
 extern int          sock_wake_async(struct socket *sk, int how, int band);
 extern int          sock_register(const struct net_proto_family *fam);
 extern void         sock_unregister(int family);
 
                if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
                        wake_up_interruptible(sk->sk_sleep);
 
-               sk_wake_async(sk, 2, POLL_OUT);
+               sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
        }
 
        read_unlock(&sk->sk_callback_lock);
 
        read_lock(&sk->sk_callback_lock);
        if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
                wake_up_interruptible(sk->sk_sleep);
-       sk_wake_async(sk,0,POLL_ERR);
+       sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
        read_unlock(&sk->sk_callback_lock);
 }
 
        read_lock(&sk->sk_callback_lock);
        if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
                wake_up_interruptible(sk->sk_sleep);
-       sk_wake_async(sk,1,POLL_IN);
+       sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
        read_unlock(&sk->sk_callback_lock);
 }
 
 
                /* Should agree with poll, otherwise some programs break */
                if (sock_writeable(sk))
-                       sk_wake_async(sk, 2, POLL_OUT);
+                       sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
        }
 
        read_unlock(&sk->sk_callback_lock);
 {
        if (sk->sk_socket && sk->sk_socket->file)
                if (send_sigurg(&sk->sk_socket->file->f_owner))
-                       sk_wake_async(sk, 3, POLL_PRI);
+                       sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
 }
 
 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
 
                if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
                        wake_up_interruptible(sk->sk_sleep);
                if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
-                       sock_wake_async(sock, 2, POLL_OUT);
+                       sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT);
        }
 }
 
 
        dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
        dccp_fin(sk, skb);
        dccp_set_state(sk, DCCP_CLOSED);
-       sk_wake_async(sk, 1, POLL_HUP);
+       sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
 }
 
 static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
        dccp_fin(sk, skb);
 
        if (err && !sock_flag(sk, SOCK_DEAD))
-               sk_wake_async(sk, 0, POLL_ERR);
+               sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
        dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
 }
 
 
                if (!sock_flag(sk, SOCK_DEAD)) {
                        sk->sk_state_change(sk);
-                       sk_wake_async(sk, 0, POLL_OUT);
+                       sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
                }
 
                if (sk->sk_write_pending || icsk->icsk_ack.pingpong ||
                switch (old_state) {
                case DCCP_PARTOPEN:
                        sk->sk_state_change(sk);
-                       sk_wake_async(sk, 0, POLL_OUT);
+                       sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
                        break;
                }
        } else if (unlikely(dh->dccph_type == DCCP_PKT_SYNC)) {
 
                wake_up_interruptible(sk->sk_sleep);
        /* Should agree with poll, otherwise some programs break */
        if (sock_writeable(sk))
-               sk_wake_async(sk, 2, POLL_OUT);
+               sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
 
        read_unlock(&sk->sk_callback_lock);
 }
 
                /* Do not send POLL_HUP for half duplex close. */
                if (sk->sk_shutdown == SHUTDOWN_MASK ||
                    sk->sk_state == TCP_CLOSE)
-                       sk_wake_async(sk, 1, POLL_HUP);
+                       sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
                else
-                       sk_wake_async(sk, 1, POLL_IN);
+                       sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
        }
 }
 
 
                if (!sock_flag(sk, SOCK_DEAD)) {
                        sk->sk_state_change(sk);
-                       sk_wake_async(sk, 0, POLL_OUT);
+                       sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
                }
 
                if (sk->sk_write_pending ||
                                 * are not waked up, because sk->sk_sleep ==
                                 * NULL and sk->sk_socket == NULL.
                                 */
-                               if (sk->sk_socket) {
-                                       sk_wake_async(sk,0,POLL_OUT);
-                               }
+                               if (sk->sk_socket)
+                                       sk_wake_async(sk,
+                                                       SOCK_WAKE_IO, POLL_OUT);
 
                                tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
                                tp->snd_wnd = ntohs(th->window) <<
 
        if (rxrpc_writable(sk)) {
                if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
                        wake_up_interruptible(sk->sk_sleep);
-               sk_wake_async(sk, 2, POLL_OUT);
+               sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
        }
        read_unlock(&sk->sk_callback_lock);
 }
 
                         */
                        if (sock->fasync_list &&
                            !(sk->sk_shutdown & SEND_SHUTDOWN))
-                               sock_wake_async(sock, 2, POLL_OUT);
+                               sock_wake_async(sock,
+                                               SOCK_WAKE_SPACE, POLL_OUT);
                }
        }
 }
 
        if (!sock || !sock->fasync_list)
                return -1;
        switch (how) {
-       case 1:
-
+       case SOCK_WAKE_WAITD:
                if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
                        break;
                goto call_kill;
-       case 2:
+       case SOCK_WAKE_SPACE:
                if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags))
                        break;
                /* fall through */
-       case 0:
+       case SOCK_WAKE_IO:
 call_kill:
                __kill_fasync(sock->fasync_list, SIGIO, band);
                break;
-       case 3:
+       case SOCK_WAKE_URG:
                __kill_fasync(sock->fasync_list, SIGURG, band);
        }
        return 0;
 
        if (unix_writable(sk)) {
                if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
                        wake_up_interruptible_sync(sk->sk_sleep);
-               sk_wake_async(sk, 2, POLL_OUT);
+               sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
        }
        read_unlock(&sk->sk_callback_lock);
 }
                        unix_state_unlock(skpair);
                        skpair->sk_state_change(skpair);
                        read_lock(&skpair->sk_callback_lock);
-                       sk_wake_async(skpair,1,POLL_HUP);
+                       sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
                        read_unlock(&skpair->sk_callback_lock);
                }
                sock_put(skpair); /* It may now die */
                        other->sk_state_change(other);
                        read_lock(&other->sk_callback_lock);
                        if (peer_mode == SHUTDOWN_MASK)
-                               sk_wake_async(other,1,POLL_HUP);
+                               sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
                        else if (peer_mode & RCV_SHUTDOWN)
-                               sk_wake_async(other,1,POLL_IN);
+                               sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
                        read_unlock(&other->sk_callback_lock);
                }
                if (other)