* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Corey Minyard <wf-rch!minyard@relay.EU.net>
* Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
- * Alan Cox, <Alan.Cox@linux.org>
+ * Alan Cox, <alan@lxorguk.ukuu.org.uk>
* Bjorn Ekwall. <bj0rn@blox.se>
* Pekka Riikonen <priikone@poseidon.pspt.fi>
*
#include <linux/workqueue.h>
#include <net/net_namespace.h>
+#include <net/dsa.h>
struct vlan_group;
struct ethtool_ops;
#define NET_XMIT_DROP 1 /* skb dropped */
#define NET_XMIT_CN 2 /* congestion notification */
#define NET_XMIT_POLICED 3 /* skb is shot by police */
-#define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue;
- (TC use only - dev_queue_xmit
- returns this as NET_XMIT_SUCCESS) */
+#define NET_XMIT_MASK 0xFFFF /* qdisc flags in net/sch_generic.h */
/* Backlog congestion levels */
#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
#endif /* __KERNEL__ */
-struct net_device_subqueue
-{
- /* Give a control state for each queue. This struct may contain
- * per-queue locks in the future.
- */
- unsigned long state;
-};
-
/*
* Network device statistics. Akin to the 2.0 ether stats but
* with byte counters.
{
__LINK_STATE_START,
__LINK_STATE_PRESENT,
- __LINK_STATE_SCHED,
__LINK_STATE_NOCARRIER,
__LINK_STATE_LINKWATCH_PENDING,
__LINK_STATE_DORMANT,
enum netdev_queue_state_t
{
__QUEUE_STATE_XOFF,
- __QUEUE_STATE_QDISC_RUNNING,
+ __QUEUE_STATE_FROZEN,
};
struct netdev_queue {
- spinlock_t lock;
struct net_device *dev;
struct Qdisc *qdisc;
unsigned long state;
- struct sk_buff *gso_skb;
spinlock_t _xmit_lock;
int xmit_lock_owner;
struct Qdisc *qdisc_sleeping;
- struct list_head qdisc_list;
- struct netdev_queue *next_sched;
} ____cacheline_aligned_in_smp;
/*
char name[IFNAMSIZ];
/* device name hash chain */
struct hlist_node name_hlist;
+ /* snmp alias */
+ char *ifalias;
/*
* I/O specific fields
#define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
/* do not use LLTX in new drivers */
#define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
-#define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */
#define NETIF_F_LRO 32768 /* large receive offload */
/* Segmentation offload features */
/* Protocol specific pointers */
+#ifdef CONFIG_NET_DSA
+ void *dsa_ptr; /* dsa specific data */
+#endif
void *atalk_ptr; /* AppleTalk link */
void *ip_ptr; /* IPv4 specific data */
void *dn_ptr; /* DECnet specific data */
struct netdev_queue rx_queue;
struct netdev_queue *_tx ____cacheline_aligned_in_smp;
+
+ /* Number of TX queues allocated at alloc_netdev_mq() time */
unsigned int num_tx_queues;
- unsigned long tx_queue_len; /* Max frames per queue allowed */
+ /* Number of TX queues currently active in device */
+ unsigned int real_num_tx_queues;
+
+ unsigned long tx_queue_len; /* Max frames per queue allowed */
+ spinlock_t tx_global_lock;
/*
* One part is mostly used on xmit path (device)
*/
void (*poll_controller)(struct net_device *dev);
#endif
+ u16 (*select_queue)(struct net_device *dev,
+ struct sk_buff *skb);
+
#ifdef CONFIG_NET_NS
/* Network namespace this network device is inside */
struct net *nd_net;
/* for setting kernel sock attribute on TCP connection setup */
#define GSO_MAX_SIZE 65536
unsigned int gso_max_size;
-
- /* The TX queue control structures */
- unsigned int egress_subqueue_count;
- struct net_device_subqueue egress_subqueue[1];
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
#endif
}
+static inline bool netdev_uses_dsa_tags(struct net_device *dev)
+{
+#ifdef CONFIG_NET_DSA_TAG_DSA
+ if (dev->dsa_ptr != NULL)
+ return dsa_uses_dsa_tags(dev->dsa_ptr);
+#endif
+
+ return 0;
+}
+
+static inline bool netdev_uses_trailer_tags(struct net_device *dev)
+{
+#ifdef CONFIG_NET_DSA_TAG_TRAILER
+ if (dev->dsa_ptr != NULL)
+ return dsa_uses_trailer_tags(dev->dsa_ptr);
+#endif
+
+ return 0;
+}
+
/**
* netdev_priv - access network device private data
* @dev: network device
*/
static inline void *netdev_priv(const struct net_device *dev)
{
- return dev->priv;
+ return (char *)dev + ((sizeof(struct net_device)
+ + NETDEV_ALIGN_CONST)
+ & ~NETDEV_ALIGN_CONST);
}
/* Set the sysfs physical device reference for the network logical device
*/
struct softnet_data
{
- struct netdev_queue *output_queue;
+ struct Qdisc *output_queue;
struct sk_buff_head input_pkt_queue;
struct list_head poll_list;
struct sk_buff *completion_queue;
#define HAVE_NETIF_QUEUE
-extern void __netif_schedule(struct netdev_queue *txq);
+extern void __netif_schedule(struct Qdisc *q);
static inline void netif_schedule_queue(struct netdev_queue *txq)
{
if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
- __netif_schedule(txq);
+ __netif_schedule(txq->qdisc);
}
-static inline void netif_schedule(struct net_device *dev)
+static inline void netif_tx_schedule_all(struct net_device *dev)
{
- netif_schedule_queue(netdev_get_tx_queue(dev, 0));
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++)
+ netif_schedule_queue(netdev_get_tx_queue(dev, i));
+}
+
+static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
+{
+ clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
}
/**
*
* Allow upper layers to call the device hard_start_xmit routine.
*/
-static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
+static inline void netif_start_queue(struct net_device *dev)
{
- clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+ netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
}
-static inline void netif_start_queue(struct net_device *dev)
+static inline void netif_tx_start_all_queues(struct net_device *dev)
{
- netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+ netif_tx_start_queue(txq);
+ }
}
-/**
- * netif_wake_queue - restart transmit
- * @dev: network device
- *
- * Allow upper layers to call the device hard_start_xmit routine.
- * Used for flow control when transmit resources are available.
- */
static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
{
#ifdef CONFIG_NETPOLL_TRAP
}
#endif
if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
- __netif_schedule(dev_queue);
+ __netif_schedule(dev_queue->qdisc);
}
+/**
+ * netif_wake_queue - restart transmit
+ * @dev: network device
+ *
+ * Allow upper layers to call the device hard_start_xmit routine.
+ * Used for flow control when transmit resources are available.
+ */
static inline void netif_wake_queue(struct net_device *dev)
{
netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
}
+static inline void netif_tx_wake_all_queues(struct net_device *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+ netif_tx_wake_queue(txq);
+ }
+}
+
+static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
+{
+ set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+}
+
/**
* netif_stop_queue - stop transmitted packets
* @dev: network device
* Stop upper layers calling the device hard_start_xmit routine.
* Used for flow control when transmit resources are unavailable.
*/
-static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
+static inline void netif_stop_queue(struct net_device *dev)
{
- set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+ netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
}
-static inline void netif_stop_queue(struct net_device *dev)
+static inline void netif_tx_stop_all_queues(struct net_device *dev)
{
- netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+ netif_tx_stop_queue(txq);
+ }
+}
+
+static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
+{
+ return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
}
/**
*
* Test if transmit queue on device is currently unable to send.
*/
-static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
+static inline int netif_queue_stopped(const struct net_device *dev)
{
- return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+ return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
}
-static inline int netif_queue_stopped(const struct net_device *dev)
+static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue)
{
- return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
+ return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state);
}
/**
*/
static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
{
- clear_bit(__QUEUE_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
+ clear_bit(__QUEUE_STATE_XOFF, &txq->state);
}
/**
*/
static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
{
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
#ifdef CONFIG_NETPOLL_TRAP
if (netpoll_trap())
return;
#endif
- set_bit(__QUEUE_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
+ set_bit(__QUEUE_STATE_XOFF, &txq->state);
}
/**
static inline int __netif_subqueue_stopped(const struct net_device *dev,
u16 queue_index)
{
- return test_bit(__QUEUE_STATE_XOFF,
- &dev->egress_subqueue[queue_index].state);
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
+ return test_bit(__QUEUE_STATE_XOFF, &txq->state);
}
static inline int netif_subqueue_stopped(const struct net_device *dev,
*/
static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
{
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
#ifdef CONFIG_NETPOLL_TRAP
if (netpoll_trap())
return;
#endif
- if (test_and_clear_bit(__QUEUE_STATE_XOFF,
- &dev->egress_subqueue[queue_index].state))
- __netif_schedule(netdev_get_tx_queue(dev, 0));
+ if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
+ __netif_schedule(txq->qdisc);
}
/**
* @dev: network device
*
* Check if device has multiple transmit queues
- * Always falls if NETDEVICE_MULTIQUEUE is not configured
*/
static inline int netif_is_multiqueue(const struct net_device *dev)
{
- return (!!(NETIF_F_MULTI_QUEUE & dev->features));
+ return (dev->num_tx_queues > 1);
}
/* Use this variant when it is known for sure that it
extern int dev_ethtool(struct net *net, struct ifreq *);
extern unsigned dev_get_flags(const struct net_device *);
extern int dev_change_flags(struct net_device *, unsigned);
-extern int dev_change_name(struct net_device *, char *);
+extern int dev_change_name(struct net_device *, const char *);
+extern int dev_set_alias(struct net_device *, const char *, size_t);
extern int dev_change_net_namespace(struct net_device *,
struct net *, const char *);
extern int dev_set_mtu(struct net_device *, int);
extern int dev_set_mac_address(struct net_device *,
struct sockaddr *);
extern int dev_hard_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
+ struct net_device *dev,
+ struct netdev_queue *txq);
extern int netdev_budget;
local_irq_restore(flags);
}
-/**
- * netif_tx_lock - grab network device transmit lock
- * @dev: network device
- * @cpu: cpu number of lock owner
- *
- * Get network device transmit lock
- */
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
spin_lock(&txq->_xmit_lock);
txq->xmit_lock_owner = cpu;
}
-static inline void netif_tx_lock(struct net_device *dev)
+static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
- int cpu = smp_processor_id();
- unsigned int i;
-
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- __netif_tx_lock(txq, cpu);
- }
-}
-
-static inline void netif_tx_lock_bh(struct net_device *dev)
-{
- local_bh_disable();
- netif_tx_lock(dev);
+ spin_lock_bh(&txq->_xmit_lock);
+ txq->xmit_lock_owner = smp_processor_id();
}
static inline int __netif_tx_trylock(struct netdev_queue *txq)
return ok;
}
-static inline int netif_tx_trylock(struct net_device *dev)
+static inline void __netif_tx_unlock(struct netdev_queue *txq)
{
- return __netif_tx_trylock(netdev_get_tx_queue(dev, 0));
+ txq->xmit_lock_owner = -1;
+ spin_unlock(&txq->_xmit_lock);
}
-static inline void __netif_tx_unlock(struct netdev_queue *txq)
+static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
{
txq->xmit_lock_owner = -1;
- spin_unlock(&txq->_xmit_lock);
+ spin_unlock_bh(&txq->_xmit_lock);
}
-static inline void netif_tx_unlock(struct net_device *dev)
+/**
+ * netif_tx_lock - grab network device transmit lock
+ * @dev: network device
+ * @cpu: cpu number of lock owner
+ *
+ * Get network device transmit lock
+ */
+static inline void netif_tx_lock(struct net_device *dev)
{
unsigned int i;
+ int cpu;
+ spin_lock(&dev->tx_global_lock);
+ cpu = smp_processor_id();
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+
+ /* We are the only thread of execution doing a
+ * freeze, but we have to grab the _xmit_lock in
+ * order to synchronize with threads which are in
+ * the ->hard_start_xmit() handler and already
+ * checked the frozen bit.
+ */
+ __netif_tx_lock(txq, cpu);
+ set_bit(__QUEUE_STATE_FROZEN, &txq->state);
__netif_tx_unlock(txq);
}
+}
+
+static inline void netif_tx_lock_bh(struct net_device *dev)
+{
+ local_bh_disable();
+ netif_tx_lock(dev);
+}
+static inline void netif_tx_unlock(struct net_device *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+
+ /* No need to grab the _xmit_lock here. If the
+ * queue is not stopped for another reason, we
+ * force a schedule.
+ */
+ clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
+ if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
+ __netif_schedule(txq->qdisc);
+ }
+ spin_unlock(&dev->tx_global_lock);
}
static inline void netif_tx_unlock_bh(struct net_device *dev)
static inline void netif_tx_disable(struct net_device *dev)
{
- netif_tx_lock_bh(dev);
- netif_stop_queue(dev);
- netif_tx_unlock_bh(dev);
+ unsigned int i;
+ int cpu;
+
+ local_bh_disable();
+ cpu = smp_processor_id();
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+
+ __netif_tx_lock(txq, cpu);
+ netif_tx_stop_queue(txq);
+ __netif_tx_unlock(txq);
+ }
+ local_bh_enable();
}
static inline void netif_addr_lock(struct net_device *dev)
extern int netdev_class_create_file(struct class_attribute *class_attr);
extern void netdev_class_remove_file(struct class_attribute *class_attr);
+extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len);
+
extern void linkwatch_run_queue(void);
extern int netdev_compute_features(unsigned long all, unsigned long one);