extern void inet_bind_bucket_destroy(struct kmem_cache *cachep,
                                     struct inet_bind_bucket *tb);
 
-static inline int inet_bhashfn(const __u16 lport, const int bhash_size)
+static inline int inet_bhashfn(struct net *net,
+               const __u16 lport, const int bhash_size)
 {
        return lport & (bhash_size - 1);
 }
 
                rover = net_random() % remaining + low;
 
                do {
-                       head = &hashinfo->bhash[inet_bhashfn(rover, hashinfo->bhash_size)];
+                       head = &hashinfo->bhash[inet_bhashfn(net, rover,
+                                       hashinfo->bhash_size)];
                        spin_lock(&head->lock);
                        inet_bind_bucket_for_each(tb, node, &head->chain)
                                if (tb->ib_net == net && tb->port == rover)
                 */
                snum = rover;
        } else {
-               head = &hashinfo->bhash[inet_bhashfn(snum, hashinfo->bhash_size)];
+               head = &hashinfo->bhash[inet_bhashfn(net, snum,
+                               hashinfo->bhash_size)];
                spin_lock(&head->lock);
                inet_bind_bucket_for_each(tb, node, &head->chain)
                        if (tb->ib_net == net && tb->port == snum)
 
 static void __inet_put_port(struct sock *sk)
 {
        struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
-       const int bhash = inet_bhashfn(inet_sk(sk)->num, hashinfo->bhash_size);
+       const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->num,
+                       hashinfo->bhash_size);
        struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
        struct inet_bind_bucket *tb;
 
 void __inet_inherit_port(struct sock *sk, struct sock *child)
 {
        struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
-       const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size);
+       const int bhash = inet_bhashfn(sock_net(sk), inet_sk(child)->num,
+                       table->bhash_size);
        struct inet_bind_hashbucket *head = &table->bhash[bhash];
        struct inet_bind_bucket *tb;
 
                local_bh_disable();
                for (i = 1; i <= remaining; i++) {
                        port = low + (i + offset) % remaining;
-                       head = &hinfo->bhash[inet_bhashfn(port, hinfo->bhash_size)];
+                       head = &hinfo->bhash[inet_bhashfn(net, port,
+                                       hinfo->bhash_size)];
                        spin_lock(&head->lock);
 
                        /* Does not bother with rcv_saddr checks,
                goto out;
        }
 
-       head = &hinfo->bhash[inet_bhashfn(snum, hinfo->bhash_size)];
+       head = &hinfo->bhash[inet_bhashfn(net, snum, hinfo->bhash_size)];
        tb  = inet_csk(sk)->icsk_bind_hash;
        spin_lock_bh(&head->lock);
        if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
 
        write_unlock(lock);
 
        /* Disassociate with bind bucket. */
-       bhead = &hashinfo->bhash[inet_bhashfn(tw->tw_num, hashinfo->bhash_size)];
+       bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
+                       hashinfo->bhash_size)];
        spin_lock(&bhead->lock);
        tb = tw->tw_tb;
        __hlist_del(&tw->tw_bind_node);
           Note, that any socket with inet->num != 0 MUST be bound in
           binding cache, even if it is closed.
         */
-       bhead = &hashinfo->bhash[inet_bhashfn(inet->num, hashinfo->bhash_size)];
+       bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->num,
+                       hashinfo->bhash_size)];
        spin_lock(&bhead->lock);
        tw->tw_tb = icsk->icsk_bind_hash;
        BUG_TRAP(icsk->icsk_bind_hash);