#define IP_ADD_STATS_BH(net, field, val) SNMP_ADD_STATS_BH(ip_statistics, field, val)
 DECLARE_SNMP_STAT(struct linux_mib, net_statistics);
 #define NET_INC_STATS(net, field)      do { (void)net; SNMP_INC_STATS(net_statistics, field); } while (0)
-#define NET_INC_STATS_BH(field)                SNMP_INC_STATS_BH(net_statistics, field)
+#define NET_INC_STATS_BH(net, field)   do { (void)net; SNMP_INC_STATS_BH(net_statistics, field); } while (0)
 #define NET_INC_STATS_USER(field)      SNMP_INC_STATS_USER(net_statistics, field)
 #define NET_ADD_STATS_BH(field, adnd)  SNMP_ADD_STATS_BH(net_statistics, field, adnd)
 #define NET_ADD_STATS_USER(field, adnd)        SNMP_ADD_STATS_USER(net_statistics, field, adnd)
 
 
                        while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
                                sk->sk_backlog_rcv(sk, skb1);
-                               NET_INC_STATS_BH(LINUX_MIB_TCPPREQUEUEDROPPED);
+                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED);
                        }
 
                        tp->ucopy.memory = 0;
 
         * servers this needs to be solved differently.
         */
        if (sock_owned_by_user(sk))
-               NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
+               NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 
        if (sk->sk_state == DCCP_CLOSED)
                goto out;
        seq = dccp_hdr_seq(dh);
        if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
            !between48(seq, dp->dccps_swl, dp->dccps_swh)) {
-               NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
+               NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
                goto out;
        }
 
                BUG_TRAP(!req->sk);
 
                if (seq != dccp_rsk(req)->dreq_iss) {
-                       NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
+                       NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
                        goto out;
                }
                /*
        return newsk;
 
 exit_overflow:
-       NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
 exit:
-       NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
        dst_release(dst);
        return NULL;
 }
 
 
        bh_lock_sock(sk);
        if (sock_owned_by_user(sk))
-               NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
+               NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 
        if (sk->sk_state == DCCP_CLOSED)
                goto out;
                BUG_TRAP(req->sk == NULL);
 
                if (seq != dccp_rsk(req)->dreq_iss) {
-                       NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
+                       NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
                        goto out;
                }
 
        return newsk;
 
 out_overflow:
-       NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
 out:
-       NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
        if (opt != NULL && opt != np->opt)
                sock_kfree_s(sk, opt, opt->tot_len);
        dst_release(dst);
 
        if (sock_owned_by_user(sk)) {
                /* Try again later. */
                icsk->icsk_ack.blocked = 1;
-               NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
                sk_reset_timer(sk, &icsk->icsk_delack_timer,
                               jiffies + TCP_DELACK_MIN);
                goto out;
                        icsk->icsk_ack.ato = TCP_ATO_MIN;
                }
                dccp_send_ack(sk);
-               NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
        }
 out:
        bh_unlock_sock(sk);
 
        if (ip_route_output_key(net, &rt, &fl) < 0)
                return 1;
        if (rt->u.dst.dev != dev) {
-               NET_INC_STATS_BH(LINUX_MIB_ARPFILTER);
+               NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER);
                flag = 1;
        }
        ip_rt_put(rt);
 
 
        if (twp) {
                *twp = tw;
-               NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
+               NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
        } else if (tw) {
                /* Silly. Should hash-dance instead... */
                inet_twsk_deschedule(tw, death_row);
-               NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
+               NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
 
                inet_twsk_put(tw);
        }
 
                ;
        *mssp = msstab[mssind] + 1;
 
-       NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESSENT);
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
 
        return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
                                     th->source, th->dest, ntohl(th->seq),
 
        if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) ||
            (mss = cookie_check(skb, cookie)) == 0) {
-               NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESFAILED);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
                goto out;
        }
 
-       NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESRECV);
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
 
        /* check for timestamp cookie support */
        memset(&tcp_opt, 0, sizeof(tcp_opt));
 
                if (tp->linger2 < 0) {
                        tcp_set_state(sk, TCP_CLOSE);
                        tcp_send_active_reset(sk, GFP_ATOMIC);
-                       NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
+                       NET_INC_STATS_BH(sock_net(sk),
+                                       LINUX_MIB_TCPABORTONLINGER);
                } else {
                        const int tmo = tcp_fin_time(sk);
 
                                       "sockets\n");
                        tcp_set_state(sk, TCP_CLOSE);
                        tcp_send_active_reset(sk, GFP_ATOMIC);
-                       NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
+                       NET_INC_STATS_BH(sock_net(sk),
+                                       LINUX_MIB_TCPABORTONMEMORY);
                }
        }
 
 
                else
                        mib_idx = LINUX_MIB_TCPSACKREORDER;
 
-               NET_INC_STATS_BH(mib_idx);
+               NET_INC_STATS_BH(sock_net(sk), mib_idx);
 #if FASTRETRANS_DEBUG > 1
                printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
                       tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
                                tp->lost_out += tcp_skb_pcount(skb);
                                TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
                        }
-                       NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT);
+                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
                } else {
                        if (before(ack_seq, new_low_seq))
                                new_low_seq = ack_seq;
        if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
                dup_sack = 1;
                tcp_dsack_seen(tp);
-               NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
        } else if (num_sacks > 1) {
                u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
                u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
                    !before(start_seq_0, start_seq_1)) {
                        dup_sack = 1;
                        tcp_dsack_seen(tp);
-                       NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
+                       NET_INC_STATS_BH(sock_net(sk),
+                                       LINUX_MIB_TCPDSACKOFORECV);
                }
        }
 
                                mib_idx = LINUX_MIB_TCPSACKDISCARD;
                        }
 
-                       NET_INC_STATS_BH(mib_idx);
+                       NET_INC_STATS_BH(sock_net(sk), mib_idx);
                        if (i == 0)
                                first_sack_index = -1;
                        continue;
 {
        if (flag & FLAG_SACK_RENEGING) {
                struct inet_connection_sock *icsk = inet_csk(sk);
-               NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
 
                tcp_enter_loss(sk, 1);
                icsk->icsk_retransmits++;
                else
                        mib_idx = LINUX_MIB_TCPFULLUNDO;
 
-               NET_INC_STATS_BH(mib_idx);
+               NET_INC_STATS_BH(sock_net(sk), mib_idx);
                tp->undo_marker = 0;
        }
        if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
                DBGUNDO(sk, "D-SACK");
                tcp_undo_cwr(sk, 1);
                tp->undo_marker = 0;
-               NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
        }
 }
 
 
                DBGUNDO(sk, "Hoe");
                tcp_undo_cwr(sk, 0);
-               NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
 
                /* So... Do not make Hoe's retransmit yet.
                 * If the first packet was delayed, the rest
                DBGUNDO(sk, "partial loss");
                tp->lost_out = 0;
                tcp_undo_cwr(sk, 1);
-               NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
                inet_csk(sk)->icsk_retransmits = 0;
                tp->undo_marker = 0;
                if (tcp_is_sack(tp))
            icsk->icsk_ca_state != TCP_CA_Open &&
            tp->fackets_out > tp->reordering) {
                tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering);
-               NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS);
        }
 
        /* D. Check consistency of the current state. */
                else
                        mib_idx = LINUX_MIB_TCPSACKRECOVERY;
 
-               NET_INC_STATS_BH(mib_idx);
+               NET_INC_STATS_BH(sock_net(sk), mib_idx);
 
                tp->high_seq = tp->snd_nxt;
                tp->prior_ssthresh = 0;
                }
                tp->frto_counter = 0;
                tp->undo_marker = 0;
-               NET_INC_STATS_BH(LINUX_MIB_TCPSPURIOUSRTOS);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS);
        }
        return 0;
 }
 
                tcp_ca_event(sk, CA_EVENT_FAST_ACK);
 
-               NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS);
        } else {
                if (ack_seq != TCP_SKB_CB(skb)->end_seq)
                        flag |= FLAG_DATA;
                else
-                       NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS);
+                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS);
 
                flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
 
                else
                        mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
 
-               NET_INC_STATS_BH(mib_idx);
+               NET_INC_STATS_BH(sock_net(sk), mib_idx);
 
                tp->rx_opt.dsack = 1;
                tp->duplicate_sack[0].start_seq = seq;
 
        if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
            before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
-               NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
                tcp_enter_quickack_mode(sk);
 
                if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
 
        if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
                /* A retransmit, 2nd most common case.  Force an immediate ack. */
-               NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
                tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
 
 out_of_window:
                        struct sk_buff *next = skb->next;
                        __skb_unlink(skb, list);
                        __kfree_skb(skb);
-                       NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
+                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
                        skb = next;
                        continue;
                }
                                struct sk_buff *next = skb->next;
                                __skb_unlink(skb, list);
                                __kfree_skb(skb);
-                               NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
+                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
                                skb = next;
                                if (skb == tail ||
                                    tcp_hdr(skb)->syn ||
        int res = 0;
 
        if (!skb_queue_empty(&tp->out_of_order_queue)) {
-               NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
                __skb_queue_purge(&tp->out_of_order_queue);
 
                /* Reset SACK state.  A conforming SACK implementation will
 
        SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
 
-       NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED);
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED);
 
        if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
                tcp_clamp_window(sk);
         * drop receive data on the floor.  It will get retransmitted
         * and hopefully then we'll have sufficient space.
         */
-       NET_INC_STATS_BH(LINUX_MIB_RCVPRUNED);
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED);
 
        /* Massive buffer overcommit. */
        tp->pred_flags = 0;
 
                                        __skb_pull(skb, tcp_header_len);
                                        tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
-                                       NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER);
+                                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER);
                                }
                                if (copied_early)
                                        tcp_cleanup_rbuf(sk, skb->len);
                                if ((int)skb->truesize > sk->sk_forward_alloc)
                                        goto step5;
 
-                               NET_INC_STATS_BH(LINUX_MIB_TCPHPHITS);
+                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
 
                                /* Bulk data transfer: receiver */
                                __skb_pull(skb, tcp_header_len);
        if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
            tcp_paws_discard(sk, skb)) {
                if (!th->rst) {
-                       NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
+                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
                        tcp_send_dupack(sk, skb);
                        goto discard;
                }
 
        if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
                TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
-               NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
                tcp_reset(sk);
                return 1;
        }
                if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
                    !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
                             tcp_time_stamp)) {
-                       NET_INC_STATS_BH(LINUX_MIB_PAWSACTIVEREJECTED);
+                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED);
                        goto reset_and_undo;
                }
 
        if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
            tcp_paws_discard(sk, skb)) {
                if (!th->rst) {
-                       NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
+                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
                        tcp_send_dupack(sk, skb);
                        goto discard;
                }
         *      Check for a SYN in window.
         */
        if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
-               NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
                tcp_reset(sk);
                return 1;
        }
                                            (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
                                             after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
                                                tcp_done(sk);
-                                               NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA);
+                                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
                                                return 1;
                                        }
 
                if (sk->sk_shutdown & RCV_SHUTDOWN) {
                        if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
                            after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
-                               NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA);
+                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
                                tcp_reset(sk);
                                return 1;
                        }
 
         * servers this needs to be solved differently.
         */
        if (sock_owned_by_user(sk))
-               NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
+               NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 
        if (sk->sk_state == TCP_CLOSE)
                goto out;
        seq = ntohl(th->seq);
        if (sk->sk_state != TCP_LISTEN &&
            !between(seq, tp->snd_una, tp->snd_nxt)) {
-               NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
+               NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
                goto out;
        }
 
                BUG_TRAP(!req->sk);
 
                if (seq != tcp_rsk(req)->snt_isn) {
-                       NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
+                       NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
                        goto out;
                }
 
                        if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
                            (s32)(peer->tcp_ts - req->ts_recent) >
                                                        TCP_PAWS_WINDOW) {
-                               NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
+                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
                                goto drop_and_release;
                        }
                }
        return newsk;
 
 exit_overflow:
-       NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
 exit:
-       NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
        dst_release(dst);
        return NULL;
 }
 
        }
 
        if (paws_reject)
-               NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
+               NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
 
        if (!th->rst) {
                /* In this case we must reset the TIMEWAIT timer.
                if (!(flg & TCP_FLAG_RST))
                        req->rsk_ops->send_ack(skb, req);
                if (paws_reject)
-                       NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
+                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
                return NULL;
        }
 
                }
 
        embryonic_reset:
-               NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
                if (!(flg & TCP_FLAG_RST))
                        req->rsk_ops->send_reset(sk, skb);
 
 
                                                mib_idx = LINUX_MIB_TCPFASTRETRANS;
                                        else
                                                mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
-                                       NET_INC_STATS_BH(mib_idx);
+                                       NET_INC_STATS_BH(sock_net(sk), mib_idx);
 
                                        if (skb == tcp_write_queue_head(sk))
                                                inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
                                                  inet_csk(sk)->icsk_rto,
                                                  TCP_RTO_MAX);
 
-               NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFORWARDRETRANS);
        }
 }
 
 
        sk->sk_error_report(sk);
 
        tcp_done(sk);
-       NET_INC_STATS_BH(LINUX_MIB_TCPABORTONTIMEOUT);
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
 }
 
 /* Do not allow orphaned sockets to eat all our resources.
                if (do_reset)
                        tcp_send_active_reset(sk, GFP_ATOMIC);
                tcp_done(sk);
-               NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
                return 1;
        }
        return 0;
        if (sock_owned_by_user(sk)) {
                /* Try again later. */
                icsk->icsk_ack.blocked = 1;
-               NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
                sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN);
                goto out_unlock;
        }
        if (!skb_queue_empty(&tp->ucopy.prequeue)) {
                struct sk_buff *skb;
 
-               NET_INC_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
 
                while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
                        sk->sk_backlog_rcv(sk, skb);
                        icsk->icsk_ack.ato      = TCP_ATO_MIN;
                }
                tcp_send_ack(sk);
-               NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
        }
        TCP_CHECK_TIMER(sk);
 
                } else {
                        mib_idx = LINUX_MIB_TCPTIMEOUTS;
                }
-               NET_INC_STATS_BH(mib_idx);
+               NET_INC_STATS_BH(sock_net(sk), mib_idx);
        }
 
        if (tcp_use_frto(sk)) {
 
 
        if (twp != NULL) {
                *twp = tw;
-               NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
+               NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED);
        } else if (tw != NULL) {
                /* Silly. Should hash-dance instead... */
                inet_twsk_deschedule(tw, death_row);
-               NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
+               NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED);
 
                inet_twsk_put(tw);
        }
 
                ;
        *mssp = msstab[mssind] + 1;
 
-       NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESSENT);
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
 
        return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source,
                                     th->dest, ntohl(th->seq),
 
        if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) ||
                (mss = cookie_check(skb, cookie)) == 0) {
-               NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESFAILED);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
                goto out;
        }
 
-       NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESRECV);
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
 
        /* check for timestamp cookie support */
        memset(&tcp_opt, 0, sizeof(tcp_opt));
 
 
        bh_lock_sock(sk);
        if (sock_owned_by_user(sk))
-               NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
+               NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 
        if (sk->sk_state == TCP_CLOSE)
                goto out;
        seq = ntohl(th->seq);
        if (sk->sk_state != TCP_LISTEN &&
            !between(seq, tp->snd_una, tp->snd_nxt)) {
-               NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
+               NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
                goto out;
        }
 
                BUG_TRAP(req->sk == NULL);
 
                if (seq != tcp_rsk(req)->snt_isn) {
-                       NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
+                       NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
                        goto out;
                }
 
        return newsk;
 
 out_overflow:
-       NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
 out:
-       NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
        if (opt && opt != np->opt)
                sock_kfree_s(sk, opt, opt->tot_len);
        dst_release(dst);
 
         * servers this needs to be solved differently.
         */
        if (sock_owned_by_user(sk))
-               NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
+               NET_INC_STATS_BH(&init_net, LINUX_MIB_LOCKDROPPEDICMPS);
 
        *app = asoc;
        *tpp = transport;