2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kernel.h>
35 #include <linux/ethtool.h>
36 #include <linux/netdevice.h>
41 #define MLX4_EN_PARM_INT(X, def_val, desc) \
42 static unsigned int X = def_val;\
43 module_param(X , uint, 0444); \
44 MODULE_PARM_DESC(X, desc);
48 * Device scope module parameters
52 /* Use a XOR rathern than Toeplitz hash function for RSS */
53 MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS");
55 /* RSS hash type mask - default to <saddr, daddr, sport, dport> */
56 MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask");
58 /* Number of LRO sessions per Rx ring (rounded up to a power of two) */
59 MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
60 "Number of LRO sessions per ring or disabled (0)");
62 /* Priority pausing */
63 MLX4_EN_PARM_INT(pptx, MLX4_EN_DEF_TX_PAUSE,
64 "Pause policy on TX: 0 never generate pause frames "
65 "1 generate pause frames according to RX buffer threshold");
66 MLX4_EN_PARM_INT(pprx, MLX4_EN_DEF_RX_PAUSE,
67 "Pause policy on RX: 0 ignore received pause frames "
68 "1 respect received pause frames");
69 MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
70 " Per priority bit mask");
71 MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
72 " Per priority bit mask");
74 MLX4_EN_PARM_INT(rx_ring_num1, 0, "Number or Rx rings for port 1 (0 = #cores)");
75 MLX4_EN_PARM_INT(rx_ring_num2, 0, "Number or Rx rings for port 2 (0 = #cores)");
77 MLX4_EN_PARM_INT(tx_ring_size1, MLX4_EN_AUTO_CONF, "Tx ring size for port 1");
78 MLX4_EN_PARM_INT(tx_ring_size2, MLX4_EN_AUTO_CONF, "Tx ring size for port 2");
79 MLX4_EN_PARM_INT(rx_ring_size1, MLX4_EN_AUTO_CONF, "Rx ring size for port 1");
80 MLX4_EN_PARM_INT(rx_ring_size2, MLX4_EN_AUTO_CONF, "Rx ring size for port 2");
83 int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
85 struct mlx4_en_profile *params = &mdev->profile;
88 params->rss_xor = (rss_xor != 0);
89 params->rss_mask = rss_mask & 0x1f;
90 params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS);
91 for (i = 1; i <= MLX4_MAX_PORTS; i++) {
92 params->prof[i].rx_pause = pprx;
93 params->prof[i].rx_ppp = pfcrx;
94 params->prof[i].tx_pause = pptx;
95 params->prof[i].tx_ppp = pfctx;
98 params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM;
99 params->prof[2].tx_ring_num = MLX4_EN_TX_RING_NUM;
101 params->prof[1].tx_ring_num = 1;
102 params->prof[2].tx_ring_num = 1;
104 params->prof[1].rx_ring_num = min_t(int, rx_ring_num1, MAX_RX_RINGS);
105 params->prof[2].rx_ring_num = min_t(int, rx_ring_num2, MAX_RX_RINGS);
107 if (tx_ring_size1 == MLX4_EN_AUTO_CONF)
108 tx_ring_size1 = MLX4_EN_DEF_TX_RING_SIZE;
109 params->prof[1].tx_ring_size =
110 (tx_ring_size1 < MLX4_EN_MIN_TX_SIZE) ?
111 MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size1);
113 if (tx_ring_size2 == MLX4_EN_AUTO_CONF)
114 tx_ring_size2 = MLX4_EN_DEF_TX_RING_SIZE;
115 params->prof[2].tx_ring_size =
116 (tx_ring_size2 < MLX4_EN_MIN_TX_SIZE) ?
117 MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size2);
119 if (rx_ring_size1 == MLX4_EN_AUTO_CONF)
120 rx_ring_size1 = MLX4_EN_DEF_RX_RING_SIZE;
121 params->prof[1].rx_ring_size =
122 (rx_ring_size1 < MLX4_EN_MIN_RX_SIZE) ?
123 MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size1);
125 if (rx_ring_size2 == MLX4_EN_AUTO_CONF)
126 rx_ring_size2 = MLX4_EN_DEF_RX_RING_SIZE;
127 params->prof[2].rx_ring_size =
128 (rx_ring_size2 < MLX4_EN_MIN_RX_SIZE) ?
129 MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size2);
138 static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
142 priv->port_stats.lro_aggregated = 0;
143 priv->port_stats.lro_flushed = 0;
144 priv->port_stats.lro_no_desc = 0;
146 for (i = 0; i < priv->rx_ring_num; i++) {
147 priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
148 priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
149 priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
154 mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
156 struct mlx4_en_priv *priv = netdev_priv(dev);
157 struct mlx4_en_dev *mdev = priv->mdev;
159 sprintf(drvinfo->driver, DRV_NAME " (%s)", mdev->dev->board_id);
160 strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
161 sprintf(drvinfo->fw_version, "%d.%d.%d",
162 (u16) (mdev->dev->caps.fw_ver >> 32),
163 (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
164 (u16) (mdev->dev->caps.fw_ver & 0xffff));
165 strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32);
166 drvinfo->n_stats = 0;
167 drvinfo->regdump_len = 0;
168 drvinfo->eedump_len = 0;
171 static u32 mlx4_en_get_tso(struct net_device *dev)
173 return (dev->features & NETIF_F_TSO) != 0;
176 static int mlx4_en_set_tso(struct net_device *dev, u32 data)
178 struct mlx4_en_priv *priv = netdev_priv(dev);
181 if (!priv->mdev->LSO_support)
183 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
185 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
189 static u32 mlx4_en_get_rx_csum(struct net_device *dev)
191 struct mlx4_en_priv *priv = netdev_priv(dev);
192 return priv->rx_csum;
195 static int mlx4_en_set_rx_csum(struct net_device *dev, u32 data)
197 struct mlx4_en_priv *priv = netdev_priv(dev);
198 priv->rx_csum = (data != 0);
202 static const char main_strings[][ETH_GSTRING_LEN] = {
203 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
204 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
205 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
206 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
207 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
208 "tx_heartbeat_errors", "tx_window_errors",
210 /* port statistics */
211 "lro_aggregated", "lro_flushed", "lro_no_desc", "tso_packets",
212 "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
213 "rx_csum_good", "rx_csum_none", "tx_chksum_offload",
215 /* packet statistics */
216 "broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3",
217 "rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0",
218 "tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5",
219 "tx_prio_6", "tx_prio_7",
221 #define NUM_MAIN_STATS 21
222 #define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
224 static u32 mlx4_en_get_msglevel(struct net_device *dev)
226 return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
229 static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
231 ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
234 static void mlx4_en_get_wol(struct net_device *netdev,
235 struct ethtool_wolinfo *wol)
243 static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
245 struct mlx4_en_priv *priv = netdev_priv(dev);
247 if (sset != ETH_SS_STATS)
250 return NUM_ALL_STATS + (priv->tx_ring_num + priv->rx_ring_num) * 2;
253 static void mlx4_en_get_ethtool_stats(struct net_device *dev,
254 struct ethtool_stats *stats, uint64_t *data)
256 struct mlx4_en_priv *priv = netdev_priv(dev);
260 spin_lock_bh(&priv->stats_lock);
262 mlx4_en_update_lro_stats(priv);
264 for (i = 0; i < NUM_MAIN_STATS; i++)
265 data[index++] = ((unsigned long *) &priv->stats)[i];
266 for (i = 0; i < NUM_PORT_STATS; i++)
267 data[index++] = ((unsigned long *) &priv->port_stats)[i];
268 for (i = 0; i < priv->tx_ring_num; i++) {
269 data[index++] = priv->tx_ring[i].packets;
270 data[index++] = priv->tx_ring[i].bytes;
272 for (i = 0; i < priv->rx_ring_num; i++) {
273 data[index++] = priv->rx_ring[i].packets;
274 data[index++] = priv->rx_ring[i].bytes;
276 for (i = 0; i < NUM_PKT_STATS; i++)
277 data[index++] = ((unsigned long *) &priv->pkstats)[i];
278 spin_unlock_bh(&priv->stats_lock);
282 static void mlx4_en_get_strings(struct net_device *dev,
283 uint32_t stringset, uint8_t *data)
285 struct mlx4_en_priv *priv = netdev_priv(dev);
289 if (stringset != ETH_SS_STATS)
292 /* Add main counters */
293 for (i = 0; i < NUM_MAIN_STATS; i++)
294 strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
295 for (i = 0; i < NUM_PORT_STATS; i++)
296 strcpy(data + (index++) * ETH_GSTRING_LEN,
297 main_strings[i + NUM_MAIN_STATS]);
298 for (i = 0; i < priv->tx_ring_num; i++) {
299 sprintf(data + (index++) * ETH_GSTRING_LEN,
301 sprintf(data + (index++) * ETH_GSTRING_LEN,
304 for (i = 0; i < priv->rx_ring_num; i++) {
305 sprintf(data + (index++) * ETH_GSTRING_LEN,
307 sprintf(data + (index++) * ETH_GSTRING_LEN,
310 for (i = 0; i < NUM_PKT_STATS; i++)
311 strcpy(data + (index++) * ETH_GSTRING_LEN,
312 main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]);
315 static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
317 cmd->autoneg = AUTONEG_DISABLE;
318 cmd->supported = SUPPORTED_10000baseT_Full;
319 cmd->advertising = SUPPORTED_10000baseT_Full;
320 if (netif_carrier_ok(dev)) {
321 cmd->speed = SPEED_10000;
322 cmd->duplex = DUPLEX_FULL;
330 static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
332 if ((cmd->autoneg == AUTONEG_ENABLE) ||
333 (cmd->speed != SPEED_10000) || (cmd->duplex != DUPLEX_FULL))
336 /* Nothing to change */
340 static int mlx4_en_get_coalesce(struct net_device *dev,
341 struct ethtool_coalesce *coal)
343 struct mlx4_en_priv *priv = netdev_priv(dev);
345 coal->tx_coalesce_usecs = 0;
346 coal->tx_max_coalesced_frames = 0;
347 coal->rx_coalesce_usecs = priv->rx_usecs;
348 coal->rx_max_coalesced_frames = priv->rx_frames;
350 coal->pkt_rate_low = priv->pkt_rate_low;
351 coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
352 coal->pkt_rate_high = priv->pkt_rate_high;
353 coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
354 coal->rate_sample_interval = priv->sample_interval;
355 coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
359 static int mlx4_en_set_coalesce(struct net_device *dev,
360 struct ethtool_coalesce *coal)
362 struct mlx4_en_priv *priv = netdev_priv(dev);
365 priv->rx_frames = (coal->rx_max_coalesced_frames ==
367 MLX4_EN_RX_COAL_TARGET /
369 coal->rx_max_coalesced_frames;
370 priv->rx_usecs = (coal->rx_coalesce_usecs ==
372 MLX4_EN_RX_COAL_TIME :
373 coal->rx_coalesce_usecs;
375 /* Set adaptive coalescing params */
376 priv->pkt_rate_low = coal->pkt_rate_low;
377 priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
378 priv->pkt_rate_high = coal->pkt_rate_high;
379 priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
380 priv->sample_interval = coal->rate_sample_interval;
381 priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
382 priv->last_moder_time = MLX4_EN_AUTO_CONF;
383 if (priv->adaptive_rx_coal)
386 for (i = 0; i < priv->rx_ring_num; i++) {
387 priv->rx_cq[i].moder_cnt = priv->rx_frames;
388 priv->rx_cq[i].moder_time = priv->rx_usecs;
389 err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
396 static int mlx4_en_set_pauseparam(struct net_device *dev,
397 struct ethtool_pauseparam *pause)
399 struct mlx4_en_priv *priv = netdev_priv(dev);
400 struct mlx4_en_dev *mdev = priv->mdev;
403 priv->prof->tx_pause = pause->tx_pause != 0;
404 priv->prof->rx_pause = pause->rx_pause != 0;
405 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
406 priv->rx_skb_size + ETH_FCS_LEN,
407 priv->prof->tx_pause,
409 priv->prof->rx_pause,
412 mlx4_err(mdev, "Failed setting pause params to\n");
417 static void mlx4_en_get_pauseparam(struct net_device *dev,
418 struct ethtool_pauseparam *pause)
420 struct mlx4_en_priv *priv = netdev_priv(dev);
422 pause->tx_pause = priv->prof->tx_pause;
423 pause->rx_pause = priv->prof->rx_pause;
426 static void mlx4_en_get_ringparam(struct net_device *dev,
427 struct ethtool_ringparam *param)
429 struct mlx4_en_priv *priv = netdev_priv(dev);
430 struct mlx4_en_dev *mdev = priv->mdev;
432 memset(param, 0, sizeof(*param));
433 param->rx_max_pending = mdev->dev->caps.max_rq_sg;
434 param->tx_max_pending = mdev->dev->caps.max_sq_sg;
435 param->rx_pending = mdev->profile.prof[priv->port].rx_ring_size;
436 param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size;
439 const struct ethtool_ops mlx4_en_ethtool_ops = {
440 .get_drvinfo = mlx4_en_get_drvinfo,
441 .get_settings = mlx4_en_get_settings,
442 .set_settings = mlx4_en_set_settings,
444 .get_tso = mlx4_en_get_tso,
445 .set_tso = mlx4_en_set_tso,
447 .get_sg = ethtool_op_get_sg,
448 .set_sg = ethtool_op_set_sg,
449 .get_link = ethtool_op_get_link,
450 .get_rx_csum = mlx4_en_get_rx_csum,
451 .set_rx_csum = mlx4_en_set_rx_csum,
452 .get_tx_csum = ethtool_op_get_tx_csum,
453 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
454 .get_strings = mlx4_en_get_strings,
455 .get_sset_count = mlx4_en_get_sset_count,
456 .get_ethtool_stats = mlx4_en_get_ethtool_stats,
457 .get_wol = mlx4_en_get_wol,
458 .get_msglevel = mlx4_en_get_msglevel,
459 .set_msglevel = mlx4_en_set_msglevel,
460 .get_coalesce = mlx4_en_get_coalesce,
461 .set_coalesce = mlx4_en_set_coalesce,
462 .get_pauseparam = mlx4_en_get_pauseparam,
463 .set_pauseparam = mlx4_en_set_pauseparam,
464 .get_ringparam = mlx4_en_get_ringparam,
465 .get_flags = ethtool_op_get_flags,
466 .set_flags = ethtool_op_set_flags,