int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr,
                  struct x25_address *calling_addr)
 {
-       int called_len, calling_len;
+       unsigned int called_len, calling_len;
        char *called, *calling;
-       int i;
+       unsigned int i;
 
        called_len  = (*p >> 0) & 0x0F;
        calling_len = (*p >> 4) & 0x0F;
 
        }
 
        if (dte_facs->calling_len && (facil_mask & X25_MASK_CALLING_AE)) {
-               unsigned bytecount = (dte_facs->calling_len % 2) ?
-                                       dte_facs->calling_len / 2 + 1 :
-                                       dte_facs->calling_len / 2;
+               unsigned bytecount = (dte_facs->calling_len + 1) >> 1;
                *p++ = X25_FAC_CALLING_AE;
                *p++ = 1 + bytecount;
                *p++ = dte_facs->calling_len;
 
                                        break;
                                }
                                if (atomic_read(&sk->sk_rmem_alloc) >
-                                   (sk->sk_rcvbuf / 2))
+                                   (sk->sk_rcvbuf >> 1))
                                        x25->condition |= X25_COND_OWN_RX_BUSY;
                        }
                        /*
 
 }
 
 static void *x25_seq_route_start(struct seq_file *seq, loff_t *pos)
+       __acquires(x25_route_list_lock)
 {
        loff_t l = *pos;
 
 }
 
 static void x25_seq_route_stop(struct seq_file *seq, void *v)
+       __releases(x25_route_list_lock)
 {
        read_unlock_bh(&x25_route_list_lock);
 }
 }
 
 static void *x25_seq_socket_start(struct seq_file *seq, loff_t *pos)
+       __acquires(x25_list_lock)
 {
        loff_t l = *pos;
 
 }
 
 static void x25_seq_socket_stop(struct seq_file *seq, void *v)
+       __releases(x25_list_lock)
 {
        read_unlock_bh(&x25_list_lock);
 }
 }
 
 static void *x25_seq_forward_start(struct seq_file *seq, loff_t *pos)
+       __acquires(x25_forward_list_lock)
 {
        loff_t l = *pos;
 
 }
 
 static void x25_seq_forward_stop(struct seq_file *seq, void *v)
+       __releases(x25_forward_list_lock)
 {
        read_unlock_bh(&x25_forward_list_lock);
 }
 
 {
        struct x25_sock *x25 = x25_sk(sk);
 
-       if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) &&
+       if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf >> 1) &&
            (x25->condition & X25_COND_OWN_RX_BUSY)) {
                x25->condition &= ~X25_COND_OWN_RX_BUSY;
                x25->condition &= ~X25_COND_ACK_PENDING;