X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=include%2Flinux%2Fnetdevice.h;h=e432b743dda24e8a0cacc263f4790d2fdb73cc6e;hb=8648b3053bff39a7ee4c711d74268079c928a657;hp=7fda03d338d1c40a76e2acd6d7a4c80d4910b198;hpb=fed8bf19ec20efc0641740b2d10aa589dbd6d1ab;p=linux-2.6-omap-h63xx.git diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 7fda03d338d..e432b743dda 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -37,6 +37,7 @@ #include #include #include +#include struct divert_blk; struct vlan_group; @@ -230,7 +231,8 @@ enum netdev_state_t __LINK_STATE_SCHED, __LINK_STATE_NOCARRIER, __LINK_STATE_RX_SCHED, - __LINK_STATE_LINKWATCH_PENDING + __LINK_STATE_LINKWATCH_PENDING, + __LINK_STATE_DORMANT, }; @@ -310,6 +312,9 @@ struct net_device #define NETIF_F_LLTX 4096 /* LockLess TX */ #define NETIF_F_UFO 8192 /* Can offload UDP Large Send*/ +#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) +#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM) + struct net_device *next_sched; /* Interface index. Unique device identifier */ @@ -335,11 +340,14 @@ struct net_device */ - unsigned short flags; /* interface flags (a la BSD) */ + unsigned int flags; /* interface flags (a la BSD) */ unsigned short gflags; unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */ unsigned short padded; /* How much padding added by alloc_netdev() */ + unsigned char operstate; /* RFC2863 operstate */ + unsigned char link_mode; /* mapping policy to operstate */ + unsigned mtu; /* interface MTU value */ unsigned short type; /* interface hardware type */ unsigned short hard_header_len; /* hardware hdr length */ @@ -402,7 +410,7 @@ struct net_device * One part is mostly used on xmit path (device) */ /* hard_start_xmit synchronizer */ - spinlock_t xmit_lock ____cacheline_aligned_in_smp; + spinlock_t _xmit_lock ____cacheline_aligned_in_smp; /* cpu id of processor entered to hard_start_xmit or -1, if nobody entered there. */ @@ -429,8 +437,7 @@ struct net_device /* register/unregister state machine */ enum { NETREG_UNINITIALIZED=0, - NETREG_REGISTERING, /* called register_netdevice */ - NETREG_REGISTERED, /* completed register todo */ + NETREG_REGISTERED, /* completed register_netdevice */ NETREG_UNREGISTERING, /* called unregister_netdevice */ NETREG_UNREGISTERED, /* completed unregister todo */ NETREG_RELEASED, /* called free_netdev */ @@ -502,6 +509,8 @@ struct net_device /* class/net/name entry */ struct class_device class_dev; + /* space for optional statistics and wireless sysfs groups */ + struct attribute_group *sysfs_groups[3]; }; #define NETDEV_ALIGN 32 @@ -588,26 +597,16 @@ struct softnet_data struct sk_buff *completion_queue; struct net_device backlog_dev; /* Sorry. 8) */ +#ifdef CONFIG_NET_DMA + struct dma_chan *net_dma; +#endif }; DECLARE_PER_CPU(struct softnet_data,softnet_data); #define HAVE_NETIF_QUEUE -static inline void __netif_schedule(struct net_device *dev) -{ - if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) { - unsigned long flags; - struct softnet_data *sd; - - local_irq_save(flags); - sd = &__get_cpu_var(softnet_data); - dev->next_sched = sd->output_queue; - sd->output_queue = dev; - raise_softirq_irqoff(NET_TX_SOFTIRQ); - local_irq_restore(flags); - } -} +extern void __netif_schedule(struct net_device *dev); static inline void netif_schedule(struct net_device *dev) { @@ -671,13 +670,7 @@ static inline void dev_kfree_skb_irq(struct sk_buff *skb) /* Use this variant in places where it could be invoked * either from interrupt or non-interrupt context. */ -static inline void dev_kfree_skb_any(struct sk_buff *skb) -{ - if (in_irq() || irqs_disabled()) - dev_kfree_skb_irq(skb); - else - dev_kfree_skb(skb); -} +extern void dev_kfree_skb_any(struct sk_buff *skb); #define HAVE_NETIF_RX 1 extern int netif_rx(struct sk_buff *skb); @@ -708,12 +701,18 @@ static inline void dev_put(struct net_device *dev) atomic_dec(&dev->refcnt); } -#define __dev_put(dev) atomic_dec(&(dev)->refcnt) -#define dev_hold(dev) atomic_inc(&(dev)->refcnt) +static inline void dev_hold(struct net_device *dev) +{ + atomic_inc(&dev->refcnt); +} /* Carrier loss detection, dial on demand. The functions netif_carrier_on * and _off may be called from IRQ context, but it is caller * who is responsible for serialization of these calls. + * + * The name carrier is inappropriate, these functions should really be + * called netif_lowerlayer_*() because they represent the state of any + * kind of lower layer not just hardware media. */ extern void linkwatch_fire_event(struct net_device *dev); @@ -729,29 +728,39 @@ extern void netif_carrier_on(struct net_device *dev); extern void netif_carrier_off(struct net_device *dev); -/* Hot-plugging. */ -static inline int netif_device_present(struct net_device *dev) +static inline void netif_dormant_on(struct net_device *dev) { - return test_bit(__LINK_STATE_PRESENT, &dev->state); + if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) + linkwatch_fire_event(dev); } -static inline void netif_device_detach(struct net_device *dev) +static inline void netif_dormant_off(struct net_device *dev) { - if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && - netif_running(dev)) { - netif_stop_queue(dev); - } + if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) + linkwatch_fire_event(dev); } -static inline void netif_device_attach(struct net_device *dev) +static inline int netif_dormant(const struct net_device *dev) { - if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && - netif_running(dev)) { - netif_wake_queue(dev); - __netdev_watchdog_up(dev); - } + return test_bit(__LINK_STATE_DORMANT, &dev->state); } + +static inline int netif_oper_up(const struct net_device *dev) { + return (dev->operstate == IF_OPER_UP || + dev->operstate == IF_OPER_UNKNOWN /* backward compat */); +} + +/* Hot-plugging. */ +static inline int netif_device_present(struct net_device *dev) +{ + return test_bit(__LINK_STATE_PRESENT, &dev->state); +} + +extern void netif_device_detach(struct net_device *dev); + +extern void netif_device_attach(struct net_device *dev); + /* * Network interface message level settings */ @@ -818,20 +827,7 @@ static inline int netif_rx_schedule_prep(struct net_device *dev) * already been called and returned 1. */ -static inline void __netif_rx_schedule(struct net_device *dev) -{ - unsigned long flags; - - local_irq_save(flags); - dev_hold(dev); - list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list); - if (dev->quota < 0) - dev->quota += dev->weight; - else - dev->quota = dev->weight; - __raise_softirq_irqoff(NET_RX_SOFTIRQ); - local_irq_restore(flags); -} +extern void __netif_rx_schedule(struct net_device *dev); /* Try to reschedule poll. Called by irq handler. */ @@ -900,11 +896,43 @@ static inline void __netif_rx_complete(struct net_device *dev) clear_bit(__LINK_STATE_RX_SCHED, &dev->state); } +static inline void netif_tx_lock(struct net_device *dev) +{ + spin_lock(&dev->_xmit_lock); + dev->xmit_lock_owner = smp_processor_id(); +} + +static inline void netif_tx_lock_bh(struct net_device *dev) +{ + spin_lock_bh(&dev->_xmit_lock); + dev->xmit_lock_owner = smp_processor_id(); +} + +static inline int netif_tx_trylock(struct net_device *dev) +{ + int err = spin_trylock(&dev->_xmit_lock); + if (!err) + dev->xmit_lock_owner = smp_processor_id(); + return err; +} + +static inline void netif_tx_unlock(struct net_device *dev) +{ + dev->xmit_lock_owner = -1; + spin_unlock(&dev->_xmit_lock); +} + +static inline void netif_tx_unlock_bh(struct net_device *dev) +{ + dev->xmit_lock_owner = -1; + spin_unlock_bh(&dev->_xmit_lock); +} + static inline void netif_tx_disable(struct net_device *dev) { - spin_lock_bh(&dev->xmit_lock); + netif_tx_lock_bh(dev); netif_stop_queue(dev); - spin_unlock_bh(&dev->xmit_lock); + netif_tx_unlock_bh(dev); } /* These functions live elsewhere (drivers/net/net_init.c, but related) */