.ndo_set_multicast_list = cp_set_rx_mode,
        .ndo_get_stats          = cp_get_stats,
        .ndo_do_ioctl           = cp_ioctl,
+       .ndo_start_xmit         = cp_start_xmit,
        .ndo_tx_timeout         = cp_tx_timeout,
 #if CP_VLAN_TAG_USED
        .ndo_vlan_rx_register   = cp_vlan_rx_register,
        memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
        dev->netdev_ops = &cp_netdev_ops;
-       dev->hard_start_xmit = cp_start_xmit;
        netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
        dev->ethtool_ops = &cp_ethtool_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
 
        .ndo_stop               = rtl8139_close,
        .ndo_get_stats          = rtl8139_get_stats,
        .ndo_validate_addr      = eth_validate_addr,
+       .ndo_start_xmit         = rtl8139_start_xmit,
        .ndo_set_multicast_list = rtl8139_set_rx_mode,
        .ndo_do_ioctl           = netdev_ioctl,
        .ndo_tx_timeout         = rtl8139_tx_timeout,
        dev->netdev_ops = &rtl8139_netdev_ops;
        dev->ethtool_ops = &rtl8139_ethtool_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
-       dev->hard_start_xmit = rtl8139_start_xmit;
        netif_napi_add(dev, &tp->napi, rtl8139_poll, 64);
 
        /* note: the hardware is not capable of sg/csum/highdma, however
 
        .ndo_stop               = ace_close,
        .ndo_tx_timeout         = ace_watchdog,
        .ndo_get_stats          = ace_get_stats,
+       .ndo_start_xmit         = ace_start_xmit,
        .ndo_set_multicast_list = ace_set_multicast_list,
        .ndo_set_mac_address    = ace_set_mac_addr,
        .ndo_change_mtu         = ace_change_mtu,
+#if ACENIC_DO_VLAN
        .ndo_vlan_rx_register   = ace_vlan_rx_register,
+#endif
 };
 
 static int __devinit acenic_probe_one(struct pci_dev *pdev,
        dev->watchdog_timeo = 5*HZ;
 
        dev->netdev_ops = &ace_netdev_ops;
-       dev->hard_start_xmit = &ace_start_xmit;
        SET_ETHTOOL_OPS(dev, &ace_ethtool_ops);
 
        /* we only display this string ONCE */
 
 static const struct net_device_ops atl1e_netdev_ops = {
        .ndo_open               = atl1e_open,
        .ndo_stop               = atl1e_close,
+       .ndo_start_xmit         = atl1e_xmit_frame,
        .ndo_get_stats          = atl1e_get_stats,
        .ndo_set_multicast_list = atl1e_set_multi,
        .ndo_validate_addr      = eth_validate_addr,
 
        netdev->irq  = pdev->irq;
        netdev->netdev_ops = &atl1e_netdev_ops;
-       netdev->hard_start_xmit = atl1e_xmit_frame,
+
        netdev->watchdog_timeo = AT_TX_WATCHDOG;
        atl1e_set_ethtool_ops(netdev);
 
 
 static const struct net_device_ops atl1_netdev_ops = {
        .ndo_open               = atl1_open,
        .ndo_stop               = atl1_close,
+       .ndo_start_xmit         = atl1_xmit_frame,
        .ndo_set_multicast_list = atlx_set_multi,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = atl1_set_mac,
        .ndo_change_mtu         = atl1_change_mtu,
        .ndo_do_ioctl           = atlx_ioctl,
-       .ndo_tx_timeout = atlx_tx_timeout,
+       .ndo_tx_timeout         = atlx_tx_timeout,
        .ndo_vlan_rx_register   = atlx_vlan_rx_register,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = atl1_poll_controller,
        adapter->mii.reg_num_mask = 0x1f;
 
        netdev->netdev_ops = &atl1_netdev_ops;
-       netdev->hard_start_xmit = &atl1_xmit_frame;
        netdev->watchdog_timeo = 5 * HZ;
 
        netdev->ethtool_ops = &atl1_ethtool_ops;
 
 static const struct net_device_ops atl2_netdev_ops = {
        .ndo_open               = atl2_open,
        .ndo_stop               = atl2_close,
+       .ndo_start_xmit         = atl2_xmit_frame,
        .ndo_set_multicast_list = atl2_set_multi,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = atl2_set_mac,
 
        atl2_setup_pcicmd(pdev);
 
-       netdev->hard_start_xmit = &atl2_xmit_frame;
        netdev->netdev_ops = &atl2_netdev_ops;
        atl2_set_ethtool_ops(netdev);
        netdev->watchdog_timeo = 5 * HZ;
 
        return 0;
 }
 
-
 static void bond_setup_by_slave(struct net_device *bond_dev,
                                struct net_device *slave_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
 
-       bond_dev->neigh_setup           = slave_dev->neigh_setup;
-       bond_dev->header_ops            = slave_dev->header_ops;
+       bond_dev->header_ops        = slave_dev->header_ops;
 
        bond_dev->type              = slave_dev->type;
        bond_dev->hard_header_len   = slave_dev->hard_header_len;
        read_unlock(&bond->lock);
 }
 
+static int bond_neigh_setup(struct net_device *dev, struct neigh_parms *parms)
+{
+       struct bonding *bond = netdev_priv(dev);
+       struct slave *slave = bond->first_slave;
+
+       if (slave) {
+               const struct net_device_ops *slave_ops
+                       = slave->dev->netdev_ops;
+               if (slave_ops->ndo_neigh_setup)
+                       return slave_ops->ndo_neigh_setup(dev, parms);
+       }
+       return 0;
+}
+
 /*
  * Change the MTU of all of a master's slaves to match the master
  */
        }
 }
 
+static int bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       const struct bonding *bond = netdev_priv(dev);
+
+       switch (bond->params.mode) {
+       case BOND_MODE_ROUNDROBIN:
+               return bond_xmit_roundrobin(skb, dev);
+       case BOND_MODE_ACTIVEBACKUP:
+               return bond_xmit_activebackup(skb, dev);
+       case BOND_MODE_XOR:
+               return bond_xmit_xor(skb, dev);
+       case BOND_MODE_BROADCAST:
+               return bond_xmit_broadcast(skb, dev);
+       case BOND_MODE_8023AD:
+               return bond_3ad_xmit_xor(skb, dev);
+       case BOND_MODE_ALB:
+       case BOND_MODE_TLB:
+               return bond_alb_xmit(skb, dev);
+       default:
+               /* Should never happen, mode already checked */
+               printk(KERN_ERR DRV_NAME ": %s: Error: Unknown bonding mode %d\n",
+                    dev->name, bond->params.mode);
+               WARN_ON_ONCE(1);
+               dev_kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
+}
+
+
 /*
  * set bond mode specific net device operations
  */
 
        switch (mode) {
        case BOND_MODE_ROUNDROBIN:
-               bond_dev->hard_start_xmit = bond_xmit_roundrobin;
                break;
        case BOND_MODE_ACTIVEBACKUP:
-               bond_dev->hard_start_xmit = bond_xmit_activebackup;
                break;
        case BOND_MODE_XOR:
-               bond_dev->hard_start_xmit = bond_xmit_xor;
                bond_set_xmit_hash_policy(bond);
                break;
        case BOND_MODE_BROADCAST:
-               bond_dev->hard_start_xmit = bond_xmit_broadcast;
                break;
        case BOND_MODE_8023AD:
                bond_set_master_3ad_flags(bond);
-               bond_dev->hard_start_xmit = bond_3ad_xmit_xor;
                bond_set_xmit_hash_policy(bond);
                break;
        case BOND_MODE_ALB:
                bond_set_master_alb_flags(bond);
                /* FALLTHRU */
        case BOND_MODE_TLB:
-               bond_dev->hard_start_xmit = bond_alb_xmit;
                break;
        default:
                /* Should never happen, mode already checked */
 static const struct net_device_ops bond_netdev_ops = {
        .ndo_open               = bond_open,
        .ndo_stop               = bond_close,
+       .ndo_start_xmit         = bond_start_xmit,
        .ndo_get_stats          = bond_get_stats,
        .ndo_do_ioctl           = bond_do_ioctl,
        .ndo_set_multicast_list = bond_set_multicast_list,
        .ndo_change_mtu         = bond_change_mtu,
-       .ndo_validate_addr      = NULL,
        .ndo_set_mac_address    = bond_set_mac_address,
+       .ndo_neigh_setup        = bond_neigh_setup,
        .ndo_vlan_rx_register   = bond_vlan_rx_register,
        .ndo_vlan_rx_add_vid    = bond_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = bond_vlan_rx_kill_vid,
 
 }
 
 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-static void vlan_rx_register(struct net_device *dev,
+static void t1_vlan_rx_register(struct net_device *dev,
                                   struct vlan_group *grp)
 {
        struct adapter *adapter = dev->ml_priv;
 static const struct net_device_ops cxgb_netdev_ops = {
        .ndo_open               = cxgb_open,
        .ndo_stop               = cxgb_close,
+       .ndo_start_xmit         = t1_start_xmit,
        .ndo_get_stats          = t1_get_stats,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_multicast_list = t1_set_rxmode,
        .ndo_change_mtu         = t1_change_mtu,
        .ndo_set_mac_address    = t1_set_mac_addr,
 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-       .ndo_vlan_rx_register   = vlan_rx_register,
+       .ndo_vlan_rx_register   = t1_vlan_rx_register,
 #endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = t1_netpoll,
                }
 
                netdev->netdev_ops = &cxgb_netdev_ops;
-               netdev->hard_start_xmit = t1_start_xmit;
                netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
                        sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
 
 
 
                netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
                netdev->netdev_ops = &cxgb_netdev_ops;
-               netdev->hard_start_xmit = t3_eth_xmit;
                SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
        }
 
 
 static const struct net_device_ops e100_netdev_ops = {
        .ndo_open               = e100_open,
        .ndo_stop               = e100_close,
+       .ndo_start_xmit         = e100_xmit_frame,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_multicast_list = e100_set_multicast_list,
        .ndo_set_mac_address    = e100_set_mac_address,
        }
 
        netdev->netdev_ops = &e100_netdev_ops;
-       netdev->hard_start_xmit = e100_xmit_frame;
        SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
        netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
        strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
 
 static const struct net_device_ops e1000_netdev_ops = {
        .ndo_open               = e1000_open,
        .ndo_stop               = e1000_close,
+       .ndo_start_xmit         = e1000_xmit_frame,
        .ndo_get_stats          = e1000_get_stats,
        .ndo_set_rx_mode        = e1000_set_rx_mode,
        .ndo_set_mac_address    = e1000_set_mac,
        }
 
        netdev->netdev_ops = &e1000_netdev_ops;
-       netdev->hard_start_xmit = &e1000_xmit_frame;
        e1000_set_ethtool_ops(netdev);
        netdev->watchdog_timeo = 5 * HZ;
        netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
 
 static const struct net_device_ops e1000e_netdev_ops = {
        .ndo_open               = e1000_open,
        .ndo_stop               = e1000_close,
+       .ndo_start_xmit         = e1000_xmit_frame,
        .ndo_get_stats          = e1000_get_stats,
        .ndo_set_multicast_list = e1000_set_multi,
        .ndo_set_mac_address    = e1000_set_mac,
 
        /* construct the net_device struct */
        netdev->netdev_ops              = &e1000e_netdev_ops;
-       netdev->hard_start_xmit         = &e1000_xmit_frame;
        e1000e_set_ethtool_ops(netdev);
        netdev->watchdog_timeo          = 5 * HZ;
        netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
 
 static const struct net_device_ops enic_netdev_ops = {
        .ndo_open               = enic_open,
        .ndo_stop               = enic_stop,
+       .ndo_start_xmit         = enic_hard_start_xmit,
        .ndo_get_stats          = enic_get_stats,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_multicast_list = enic_set_multicast_list,
        }
 
        netdev->netdev_ops = &enic_netdev_ops;
-       netdev->hard_start_xmit = enic_hard_start_xmit;
        netdev->watchdog_timeo = 2 * HZ;
        netdev->ethtool_ops = &enic_ethtool_ops;
 
 
        .ndo_open               = nv_open,
        .ndo_stop               = nv_close,
        .ndo_get_stats          = nv_get_stats,
+       .ndo_start_xmit         = nv_start_xmit,
+       .ndo_tx_timeout         = nv_tx_timeout,
+       .ndo_change_mtu         = nv_change_mtu,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = nv_set_mac_address,
+       .ndo_set_multicast_list = nv_set_multicast,
+       .ndo_vlan_rx_register   = nv_vlan_rx_register,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = nv_poll_controller,
+#endif
+};
+
+static const struct net_device_ops nv_netdev_ops_optimized = {
+       .ndo_open               = nv_open,
+       .ndo_stop               = nv_close,
+       .ndo_get_stats          = nv_get_stats,
+       .ndo_start_xmit         = nv_start_xmit_optimized,
        .ndo_tx_timeout         = nv_tx_timeout,
        .ndo_change_mtu         = nv_change_mtu,
        .ndo_validate_addr      = eth_validate_addr,
                goto out_freering;
 
        if (!nv_optimized(np))
-               dev->hard_start_xmit = nv_start_xmit;
+               dev->netdev_ops = &nv_netdev_ops;
        else
-               dev->hard_start_xmit = nv_start_xmit_optimized;
+               dev->netdev_ops = &nv_netdev_ops_optimized;
 
-       dev->netdev_ops = &nv_netdev_ops;
 #ifdef CONFIG_FORCEDETH_NAPI
        netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
 #endif
 
 }
 
 static const struct net_device_ops ifb_netdev_ops = {
-       .ndo_validate_addr = eth_validate_addr,
        .ndo_open       = ifb_open,
        .ndo_stop       = ifb_close,
+       .ndo_start_xmit = ifb_xmit,
+       .ndo_validate_addr = eth_validate_addr,
 };
 
 static void ifb_setup(struct net_device *dev)
 {
        /* Initialize the device structure. */
-       dev->hard_start_xmit = ifb_xmit;
        dev->destructor = free_netdev;
        dev->netdev_ops = &ifb_netdev_ops;
 
 
 static const struct net_device_ops igb_netdev_ops = {
        .ndo_open               = igb_open,
        .ndo_stop               = igb_close,
+       .ndo_start_xmit         = igb_xmit_frame_adv,
        .ndo_get_stats          = igb_get_stats,
        .ndo_set_multicast_list = igb_set_multi,
        .ndo_set_mac_address    = igb_set_mac,
        netdev->netdev_ops = &igb_netdev_ops;
        igb_set_ethtool_ops(netdev);
        netdev->watchdog_timeo = 5 * HZ;
-       netdev->hard_start_xmit = &igb_xmit_frame_adv;
 
        strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
 
 
 static const struct net_device_ops ixgb_netdev_ops = {
        .ndo_open               = ixgb_open,
        .ndo_stop               = ixgb_close,
+       .ndo_start_xmit         = ixgb_xmit_frame,
        .ndo_get_stats          = ixgb_get_stats,
        .ndo_set_multicast_list = ixgb_set_multi,
        .ndo_validate_addr      = eth_validate_addr,
        }
 
        netdev->netdev_ops = &ixgb_netdev_ops;
-       netdev->hard_start_xmit = &ixgb_xmit_frame;
        ixgb_set_ethtool_ops(netdev);
        netdev->watchdog_timeo = 5 * HZ;
        netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
 
 static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_open               = ixgbe_open,
        .ndo_stop               = ixgbe_close,
+       .ndo_start_xmit         = ixgbe_xmit_frame,
        .ndo_get_stats          = ixgbe_get_stats,
        .ndo_set_multicast_list = ixgbe_set_rx_mode,
        .ndo_validate_addr      = eth_validate_addr,
        }
 
        netdev->netdev_ops = &ixgbe_netdev_ops;
-       netdev->hard_start_xmit = &ixgbe_xmit_frame;
        ixgbe_set_ethtool_ops(netdev);
        netdev->watchdog_timeo = 5 * HZ;
        strcpy(netdev->name, pci_name(pdev));
 
 
 static const struct net_device_ops loopback_ops = {
        .ndo_init      = loopback_dev_init,
+       .ndo_start_xmit= loopback_xmit,
        .ndo_get_stats = loopback_get_stats,
 };
 
 static void loopback_setup(struct net_device *dev)
 {
        dev->mtu                = (16 * 1024) + 20 + 20 + 12;
-       dev->hard_start_xmit    = loopback_xmit;
        dev->hard_header_len    = ETH_HLEN;     /* 14   */
        dev->addr_len           = ETH_ALEN;     /* 6    */
        dev->tx_queue_len       = 0;
 
        return NULL;
 }
 
-static int macvlan_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static int macvlan_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        const struct macvlan_dev *vlan = netdev_priv(dev);
        unsigned int len = skb->len;
        .ndo_init               = macvlan_init,
        .ndo_open               = macvlan_open,
        .ndo_stop               = macvlan_stop,
+       .ndo_start_xmit         = macvlan_start_xmit,
        .ndo_change_mtu         = macvlan_change_mtu,
        .ndo_change_rx_flags    = macvlan_change_rx_flags,
        .ndo_set_mac_address    = macvlan_set_mac_address,
        ether_setup(dev);
 
        dev->netdev_ops         = &macvlan_netdev_ops;
-       dev->hard_start_xmit    = macvlan_hard_start_xmit;
        dev->destructor         = free_netdev;
        dev->header_ops         = &macvlan_hard_header_ops,
        dev->ethtool_ops        = &macvlan_ethtool_ops;
 
 static const struct net_device_ops niu_netdev_ops = {
        .ndo_open               = niu_open,
        .ndo_stop               = niu_close,
+       .ndo_start_xmit         = niu_start_xmit,
        .ndo_get_stats          = niu_get_stats,
        .ndo_set_multicast_list = niu_set_rx_mode,
        .ndo_validate_addr      = eth_validate_addr,
 static void __devinit niu_assign_netdev_ops(struct net_device *dev)
 {
        dev->netdev_ops = &niu_netdev_ops;
-       dev->hard_start_xmit = niu_start_xmit;
        dev->ethtool_ops = &niu_ethtool_ops;
        dev->watchdog_timeo = NIU_TX_TIMEOUT;
 }
 
 }
 
 static const struct net_device_ops ppp_netdev_ops = {
-       .ndo_do_ioctl = ppp_net_ioctl,
+       .ndo_start_xmit = ppp_start_xmit,
+       .ndo_do_ioctl   = ppp_net_ioctl,
 };
 
 static void ppp_setup(struct net_device *dev)
        skb_queue_head_init(&ppp->mrq);
 #endif /* CONFIG_PPP_MULTILINK */
 
-       dev->hard_start_xmit = ppp_start_xmit;
-
        ret = -EEXIST;
        mutex_lock(&all_ppp_mutex);
        if (unit < 0)
 
        .ndo_open               = rtl8169_open,
        .ndo_stop               = rtl8169_close,
        .ndo_get_stats          = rtl8169_get_stats,
+       .ndo_start_xmit         = rtl8169_start_xmit,
        .ndo_tx_timeout         = rtl8169_tx_timeout,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_change_mtu         = rtl8169_change_mtu,
                dev->dev_addr[i] = RTL_R8(MAC0 + i);
        memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
-       dev->hard_start_xmit = rtl8169_start_xmit;
        SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
        dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
        dev->irq = pdev->irq;
 
 static const struct net_device_ops skge_netdev_ops = {
        .ndo_open               = skge_up,
        .ndo_stop               = skge_down,
+       .ndo_start_xmit         = skge_xmit_frame,
        .ndo_do_ioctl           = skge_ioctl,
        .ndo_get_stats          = skge_get_stats,
        .ndo_tx_timeout         = skge_tx_timeout,
        }
 
        SET_NETDEV_DEV(dev, &hw->pdev->dev);
-       dev->hard_start_xmit = skge_xmit_frame;
        dev->netdev_ops = &skge_netdev_ops;
        dev->ethtool_ops = &skge_ethtool_ops;
        dev->watchdog_timeo = TX_WATCHDOG;
 
   {
        .ndo_open               = sky2_up,
        .ndo_stop               = sky2_down,
+       .ndo_start_xmit         = sky2_xmit_frame,
        .ndo_do_ioctl           = sky2_ioctl,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = sky2_set_mac_address,
   {
        .ndo_open               = sky2_up,
        .ndo_stop               = sky2_down,
+       .ndo_start_xmit         = sky2_xmit_frame,
        .ndo_do_ioctl           = sky2_ioctl,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = sky2_set_mac_address,
 
        SET_NETDEV_DEV(dev, &hw->pdev->dev);
        dev->irq = hw->pdev->irq;
-       dev->hard_start_xmit = sky2_xmit_frame;
        SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops);
        dev->watchdog_timeo = TX_WATCHDOG;
        dev->netdev_ops = &sky2_netdev_ops[port];
 
        else
                tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
 
-       /* All chips before 5787 can get confused if TX buffers
-        * straddle the 4GB address boundary in some cases.
-        */
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
-               tp->dev->hard_start_xmit = tg3_start_xmit;
-       else
-               tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
-
        tp->rx_offset = 2;
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
            (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
 static const struct net_device_ops tg3_netdev_ops = {
        .ndo_open               = tg3_open,
        .ndo_stop               = tg3_close,
+       .ndo_start_xmit         = tg3_start_xmit,
+       .ndo_get_stats          = tg3_get_stats,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_multicast_list = tg3_set_rx_mode,
+       .ndo_set_mac_address    = tg3_set_mac_addr,
+       .ndo_do_ioctl           = tg3_ioctl,
+       .ndo_tx_timeout         = tg3_tx_timeout,
+       .ndo_change_mtu         = tg3_change_mtu,
+#if TG3_VLAN_TAG_USED
+       .ndo_vlan_rx_register   = tg3_vlan_rx_register,
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = tg3_poll_controller,
+#endif
+};
+
+static const struct net_device_ops tg3_netdev_ops_dma_bug = {
+       .ndo_open               = tg3_open,
+       .ndo_stop               = tg3_close,
+       .ndo_start_xmit         = tg3_start_xmit_dma_bug,
        .ndo_get_stats          = tg3_get_stats,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_multicast_list = tg3_set_rx_mode,
        tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
        tp->tx_pending = TG3_DEF_TX_RING_PENDING;
 
-       dev->netdev_ops = &tg3_netdev_ops;
        netif_napi_add(dev, &tp->napi, tg3_poll, 64);
        dev->ethtool_ops = &tg3_ethtool_ops;
        dev->watchdog_timeo = TG3_TX_TIMEOUT;
                goto err_out_iounmap;
        }
 
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+               dev->netdev_ops = &tg3_netdev_ops;
+       else
+               dev->netdev_ops = &tg3_netdev_ops_dma_bug;
+
+
        /* The EPB bridge inside 5714, 5715, and 5780 and any
         * device behind the EPB cannot support DMA addresses > 40-bit.
         * On 64-bit systems with IOMMU, use 40-bit dma_mask.
 
 static const struct net_device_ops tun_netdev_ops = {
        .ndo_open               = tun_net_open,
        .ndo_stop               = tun_net_close,
+       .ndo_start_xmit         = tun_net_xmit,
        .ndo_change_mtu         = tun_net_change_mtu,
-
 };
 
 static const struct net_device_ops tap_netdev_ops = {
        .ndo_open               = tun_net_open,
        .ndo_stop               = tun_net_close,
+       .ndo_start_xmit         = tun_net_xmit,
        .ndo_change_mtu         = tun_net_change_mtu,
        .ndo_set_multicast_list = tun_net_mclist,
        .ndo_set_mac_address    = eth_mac_addr,
        tun->owner = -1;
        tun->group = -1;
 
-       dev->hard_start_xmit = tun_net_xmit;
        dev->ethtool_ops = &tun_ethtool_ops;
        dev->destructor = free_netdev;
        dev->features |= NETIF_F_NETNS_LOCAL;
 
 static const struct net_device_ops veth_netdev_ops = {
        .ndo_init       = veth_dev_init,
        .ndo_open       = veth_open,
+       .ndo_start_xmit = veth_xmit,
        .ndo_get_stats  = veth_get_stats,
 };
 
        ether_setup(dev);
 
        dev->netdev_ops = &veth_netdev_ops;
-       dev->hard_start_xmit = veth_xmit;
        dev->ethtool_ops = &veth_ethtool_ops;
        dev->features |= NETIF_F_LLTX;
        dev->destructor = veth_dev_free;
 
 static const struct net_device_ops velocity_netdev_ops = {
        .ndo_open               = velocity_open,
        .ndo_stop               = velocity_close,
+       .ndo_start_xmit         = velocity_xmit,
        .ndo_get_stats          = velocity_get_stats,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_multicast_list = velocity_set_multi,
        vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
 
        dev->irq = pdev->irq;
-       dev->hard_start_xmit = velocity_xmit;
        dev->netdev_ops = &velocity_netdev_ops;
        dev->ethtool_ops = &velocity_ethtool_ops;
 
 
 
 /*
  * This structure defines the management hooks for network devices.
- * The following hooks can bed defined and are optonal (can be null)
- * unless otherwise noted.
+ * The following hooks can be defined; unless noted otherwise, they are
+ * optional and can be filled with a null pointer.
  *
  * int (*ndo_init)(struct net_device *dev);
  *     This function is called once when network device is registered.
  *     This function is called when network device transistions to the down
  *     state.
  *
+ * int (*ndo_hard_start_xmit)(struct sk_buff *skb, struct net_device *dev);
+ *     Called when a packet needs to be transmitted.
+ *     Must return NETDEV_TX_OK , NETDEV_TX_BUSY, or NETDEV_TX_LOCKED,
+ *     Required can not be NULL.
+ *
+ * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
+ *     Called to decide which queue to when device supports multiple
+ *     transmit queues.
+ *
  * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
  *     This function is called to allow device receiver to make
  *     changes to configuration when multicast or promiscious is enabled.
  *     of a device. If not defined, any request to change MTU will
  *     will return an error.
  *
- * void (*ndo_tx_timeout) (struct net_device *dev);
+ * void (*ndo_tx_timeout)(struct net_device *dev);
  *     Callback uses when the transmitter has not made any progress
  *     for dev->watchdog ticks.
  *
        void                    (*ndo_uninit)(struct net_device *dev);
        int                     (*ndo_open)(struct net_device *dev);
        int                     (*ndo_stop)(struct net_device *dev);
+       int                     (*ndo_start_xmit) (struct sk_buff *skb,
+                                                  struct net_device *dev);
+       u16                     (*ndo_select_queue)(struct net_device *dev,
+                                                   struct sk_buff *skb);
 #define HAVE_CHANGE_RX_FLAGS
        void                    (*ndo_change_rx_flags)(struct net_device *dev,
                                                       int flags);
        int                     (*ndo_set_config)(struct net_device *dev,
                                                  struct ifmap *map);
 #define HAVE_CHANGE_MTU
-       int                     (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
-
+       int                     (*ndo_change_mtu)(struct net_device *dev,
+                                                 int new_mtu);
+       int                     (*ndo_neigh_setup)(struct net_device *dev,
+                                                  struct neigh_parms *);
 #define HAVE_TX_TIMEOUT
        void                    (*ndo_tx_timeout) (struct net_device *dev);
 
        /* Number of TX queues currently active in device  */
        unsigned int            real_num_tx_queues;
 
-       /* Map buffer to appropriate transmit queue */
-       u16                     (*select_queue)(struct net_device *dev,
-                                               struct sk_buff *skb);
-
        unsigned long           tx_queue_len;   /* Max frames per queue allowed */
        spinlock_t              tx_global_lock;
 /*
  * One part is mostly used on xmit path (device)
  */
        void                    *priv;  /* pointer to private data      */
-       int                     (*hard_start_xmit) (struct sk_buff *skb,
-                                                   struct net_device *dev);
        /* These may be needed for future network-power-down code. */
        unsigned long           trans_start;    /* Time (in jiffies) of last Tx */
 
        /* Called from unregister, can be used to call free_netdev */
        void (*destructor)(struct net_device *dev);
 
-       int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
-
 #ifdef CONFIG_NETPOLL
        struct netpoll_info     *npinfo;
 #endif
                void                    (*uninit)(struct net_device *dev);
                int                     (*open)(struct net_device *dev);
                int                     (*stop)(struct net_device *dev);
+               int                     (*hard_start_xmit) (struct sk_buff *skb,
+                                                           struct net_device *dev);
+               u16                     (*select_queue)(struct net_device *dev,
+                                                       struct sk_buff *skb);
                void                    (*change_rx_flags)(struct net_device *dev,
                                                           int flags);
                void                    (*set_rx_mode)(struct net_device *dev);
                int                     (*set_config)(struct net_device *dev,
                                                      struct ifmap *map);
                int                     (*change_mtu)(struct net_device *dev, int new_mtu);
+               int                     (*neigh_setup)(struct net_device *dev,
+                                                      struct neigh_parms *);
                void                    (*tx_timeout) (struct net_device *dev);
                struct net_device_stats* (*get_stats)(struct net_device *dev);
                void                    (*vlan_rx_register)(struct net_device *dev,
 
 static const struct net_device_ops br_netdev_ops = {
        .ndo_open                = br_dev_open,
        .ndo_stop                = br_dev_stop,
-       .ndo_set_mac_address = br_set_mac_address,
-       .ndo_set_multicast_list = br_dev_set_multicast_list,
-       .ndo_change_mtu  = br_change_mtu,
-       .ndo_do_ioctl   = br_dev_ioctl,
+       .ndo_start_xmit          = br_dev_xmit,
+       .ndo_set_mac_address     = br_set_mac_address,
+       .ndo_set_multicast_list  = br_dev_set_multicast_list,
+       .ndo_change_mtu          = br_change_mtu,
+       .ndo_do_ioctl            = br_dev_ioctl,
 };
 
 void br_dev_setup(struct net_device *dev)
        ether_setup(dev);
 
        dev->netdev_ops = &br_netdev_ops;
-       dev->hard_start_xmit = br_dev_xmit;
        dev->destructor = free_netdev;
        SET_ETHTOOL_OPS(dev, &br_ethtool_ops);
        dev->tx_queue_len = 0;
 
        if (dev->flags & IFF_LOOPBACK || dev->type != ARPHRD_ETHER)
                return -EINVAL;
 
-       if (dev->hard_start_xmit == br_dev_xmit)
+       if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit)
                return -ELOOP;
 
        if (dev->br_port != NULL)
 
 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                        struct netdev_queue *txq)
 {
+       const struct net_device_ops *ops = dev->netdev_ops;
+
+       prefetch(&dev->netdev_ops->ndo_start_xmit);
        if (likely(!skb->next)) {
                if (!list_empty(&ptype_all))
                        dev_queue_xmit_nit(skb, dev);
                                goto gso;
                }
 
-               return dev->hard_start_xmit(skb, dev);
+               return ops->ndo_start_xmit(skb, dev);
        }
 
 gso:
 
                skb->next = nskb->next;
                nskb->next = NULL;
-               rc = dev->hard_start_xmit(nskb, dev);
+               rc = ops->ndo_start_xmit(nskb, dev);
                if (unlikely(rc)) {
                        nskb->next = skb->next;
                        skb->next = nskb;
 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
                                        struct sk_buff *skb)
 {
+       const struct net_device_ops *ops = dev->netdev_ops;
        u16 queue_index = 0;
 
-       if (dev->select_queue)
-               queue_index = dev->select_queue(dev, skb);
+       if (ops->ndo_select_queue)
+               queue_index = ops->ndo_select_queue(dev, skb);
        else if (dev->real_num_tx_queues > 1)
                queue_index = simple_tx_hash(dev, skb);
 
 
                                      struct neigh_table *tbl)
 {
        struct neigh_parms *p, *ref;
-       struct net *net;
+       struct net *net = dev_net(dev);
+       const struct net_device_ops *ops = dev->netdev_ops;
 
-       net = dev_net(dev);
        ref = lookup_neigh_params(tbl, net, 0);
        if (!ref)
                return NULL;
                p->reachable_time =
                                neigh_rand_reach_time(p->base_reachable_time);
 
-               if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
+               if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
                        kfree(p);
                        return NULL;
                }
 
 
        while ((skb = skb_dequeue(&npinfo->txq))) {
                struct net_device *dev = skb->dev;
+               const struct net_device_ops *ops = dev->netdev_ops;
                struct netdev_queue *txq;
 
                if (!netif_device_present(dev) || !netif_running(dev)) {
                __netif_tx_lock(txq, smp_processor_id());
                if (netif_tx_queue_stopped(txq) ||
                    netif_tx_queue_frozen(txq) ||
-                   dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
+                   ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
                        skb_queue_head(&npinfo->txq, skb);
                        __netif_tx_unlock(txq);
                        local_irq_restore(flags);
        int status = NETDEV_TX_BUSY;
        unsigned long tries;
        struct net_device *dev = np->dev;
+       const struct net_device_ops *ops = dev->netdev_ops;
        struct netpoll_info *npinfo = np->dev->npinfo;
 
        if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
                     tries > 0; --tries) {
                        if (__netif_tx_trylock(txq)) {
                                if (!netif_tx_queue_stopped(txq))
-                                       status = dev->hard_start_xmit(skb, dev);
+                                       status = ops->ndo_start_xmit(skb, dev);
                                __netif_tx_unlock(txq);
 
                                if (status == NETDEV_TX_OK)
 
 
 static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
 {
-       struct net_device *odev = NULL;
+       struct net_device *odev = pkt_dev->odev;
+       int (*xmit)(struct sk_buff *, struct net_device *)
+               = odev->netdev_ops->ndo_start_xmit;
        struct netdev_queue *txq;
        __u64 idle_start = 0;
        u16 queue_map;
        int ret;
 
-       odev = pkt_dev->odev;
-
        if (pkt_dev->delay_us || pkt_dev->delay_ns) {
                u64 now;
 
 
                atomic_inc(&(pkt_dev->skb->users));
              retry_now:
-               ret = odev->hard_start_xmit(pkt_dev->skb, odev);
+               ret = (*xmit)(pkt_dev->skb, odev);
                if (likely(ret == NETDEV_TX_OK)) {
                        pkt_dev->last_ok = 1;
                        pkt_dev->sofar++;