Skip to content

Commit

Permalink
Merge branch 'mlx-next'
Browse files Browse the repository at this point in the history
Or Gerlitz says:

====================
Mellanox NIC drivers update, June 23 2015

This series has two fixes from Eran to his recent SRIOV counters work in
mlx4 and few more updates from Saeed and Achiad to the mlx5 Ethernet
code. All fixes here relate to net-next code, so no need for -stable.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
davem330 committed Jun 24, 2015
2 parents 0a51f76 + 99611ba commit 12d4ae9
Show file tree
Hide file tree
Showing 7 changed files with 93 additions and 78 deletions.
14 changes: 14 additions & 0 deletions drivers/net/ethernet/mellanox/mlx4/en_port.c
Original file line number Diff line number Diff line change
Expand Up @@ -203,6 +203,20 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.tso_packets += ring->tso_packets;
priv->port_stats.xmit_more += ring->xmit_more;
}
if (mlx4_is_master(mdev->dev)) {
stats->rx_packets = en_stats_adder(&mlx4_en_stats->RTOT_prio_0,
&mlx4_en_stats->RTOT_prio_1,
NUM_PRIORITIES);
stats->tx_packets = en_stats_adder(&mlx4_en_stats->TTOT_prio_0,
&mlx4_en_stats->TTOT_prio_1,
NUM_PRIORITIES);
stats->rx_bytes = en_stats_adder(&mlx4_en_stats->ROCT_prio_0,
&mlx4_en_stats->ROCT_prio_1,
NUM_PRIORITIES);
stats->tx_bytes = en_stats_adder(&mlx4_en_stats->TOCT_prio_0,
&mlx4_en_stats->TOCT_prio_1,
NUM_PRIORITIES);
}

/* net device stats */
stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
Expand Down
5 changes: 2 additions & 3 deletions drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,7 @@ struct mlx4_en_flow_stats_tx {

#define NUM_FLOW_STATS (NUM_FLOW_STATS_RX + NUM_FLOW_STATS_TX + \
NUM_FLOW_PRIORITY_STATS_TX + \
NUM_FLOW_PRIORITY_STATS_RX + \
NUM_PF_STATS)
NUM_FLOW_PRIORITY_STATS_RX)

struct mlx4_en_stat_out_flow_control_mbox {
/* Total number of PAUSE frames received from the far-end port */
Expand Down Expand Up @@ -108,7 +107,7 @@ enum {
};

#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \
NUM_FLOW_STATS + NUM_PERF_STATS)
NUM_FLOW_STATS + NUM_PERF_STATS + NUM_PF_STATS)

#define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \
sizeof(((struct net_device_stats *)0)->n))
Expand Down
6 changes: 2 additions & 4 deletions drivers/net/ethernet/mellanox/mlx5/core/en.h
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,6 @@ enum cq_flags {
struct mlx5e_cq {
/* data path - accessed per cqe */
struct mlx5_cqwq wq;
void *sqrq;
unsigned long flags;

/* data path - accessed per napi poll */
Expand Down Expand Up @@ -316,6 +315,7 @@ struct mlx5e_channel {
__be32 mkey_be;
u8 num_tc;
unsigned long flags;
int tc_to_txq_map[MLX5E_MAX_NUM_TC];

/* control */
struct mlx5e_priv *priv;
Expand Down Expand Up @@ -379,10 +379,9 @@ struct mlx5e_flow_table {

struct mlx5e_priv {
/* priv data path fields - start */
int order_base_2_num_channels;
int queue_mapping_channel_mask;
int num_tc;
int default_vlan_prio;
struct mlx5e_sq **txq_to_sq_map;
/* priv data path fields - end */

unsigned long state;
Expand Down Expand Up @@ -460,7 +459,6 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx5e_xmit_multi_tc(struct sk_buff *skb, struct net_device *dev);

void mlx5e_completion_event(struct mlx5_core_cq *mcq);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
Expand Down
45 changes: 27 additions & 18 deletions drivers/net/ethernet/mellanox/mlx5/core/en_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -345,7 +345,6 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
MLX5_SET(rqc, rqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
Expand Down Expand Up @@ -496,6 +495,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,

void *sqc = param->sqc;
void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
int txq_ix;
int err;

err = mlx5_alloc_map_uar(mdev, &sq->uar);
Expand All @@ -515,14 +515,15 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
if (err)
goto err_sq_wq_destroy;

sq->txq = netdev_get_tx_queue(priv->netdev,
c->ix + tc * priv->params.num_channels);
txq_ix = c->ix + tc * priv->params.num_channels;
sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);

sq->pdev = c->pdev;
sq->mkey_be = c->mkey_be;
sq->channel = c;
sq->tc = tc;
sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
priv->txq_to_sq_map[txq_ix] = sq;

return 0;

Expand Down Expand Up @@ -852,8 +853,6 @@ static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
priv->params.tx_cq_moderation_pkts);
if (err)
goto err_close_tx_cqs;

c->sq[tc].cq.sqrq = &c->sq[tc];
}

return 0;
Expand Down Expand Up @@ -902,6 +901,15 @@ static void mlx5e_close_sqs(struct mlx5e_channel *c)
mlx5e_close_sq(&c->sq[tc]);
}

static void mlx5e_build_tc_to_txq_map(struct mlx5e_channel *c,
int num_channels)
{
int i;

for (i = 0; i < MLX5E_MAX_NUM_TC; i++)
c->tc_to_txq_map[i] = c->ix + i * num_channels;
}

static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel_param *cparam,
struct mlx5e_channel **cp)
Expand All @@ -923,6 +931,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->mkey_be = cpu_to_be32(priv->mr.key);
c->num_tc = priv->num_tc;

mlx5e_build_tc_to_txq_map(c, priv->params.num_channels);

netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);

err = mlx5e_open_tx_cqs(c, cparam);
Expand All @@ -934,7 +944,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
priv->params.rx_cq_moderation_pkts);
if (err)
goto err_close_tx_cqs;
c->rq.cq.sqrq = &c->rq;

napi_enable(&c->napi);

Expand Down Expand Up @@ -1050,14 +1059,18 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
static int mlx5e_open_channels(struct mlx5e_priv *priv)
{
struct mlx5e_channel_param cparam;
int err;
int err = -ENOMEM;
int i;
int j;

priv->channel = kcalloc(priv->params.num_channels,
sizeof(struct mlx5e_channel *), GFP_KERNEL);
if (!priv->channel)
return -ENOMEM;

priv->txq_to_sq_map = kcalloc(priv->params.num_channels * priv->num_tc,
sizeof(struct mlx5e_sq *), GFP_KERNEL);

if (!priv->channel || !priv->txq_to_sq_map)
goto err_free_txq_to_sq_map;

mlx5e_build_channel_param(priv, &cparam);
for (i = 0; i < priv->params.num_channels; i++) {
Expand All @@ -1078,6 +1091,8 @@ static int mlx5e_open_channels(struct mlx5e_priv *priv)
for (i--; i >= 0; i--)
mlx5e_close_channel(priv->channel[i]);

err_free_txq_to_sq_map:
kfree(priv->txq_to_sq_map);
kfree(priv->channel);

return err;
Expand All @@ -1090,6 +1105,7 @@ static void mlx5e_close_channels(struct mlx5e_priv *priv)
for (i = 0; i < priv->params.num_channels; i++)
mlx5e_close_channel(priv->channel[i]);

kfree(priv->txq_to_sq_map);
kfree(priv->channel);
}

Expand Down Expand Up @@ -1384,8 +1400,7 @@ int mlx5e_open_locked(struct net_device *netdev)
int num_txqs;
int err;

num_txqs = roundup_pow_of_two(priv->params.num_channels) *
priv->params.num_tc;
num_txqs = priv->params.num_channels * priv->params.num_tc;
netif_set_real_num_tx_queues(netdev, num_txqs);
netif_set_real_num_rx_queues(netdev, priv->params.num_channels);

Expand Down Expand Up @@ -1693,9 +1708,6 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
priv->mdev = mdev;
priv->netdev = netdev;
priv->params.num_channels = num_comp_vectors;
priv->order_base_2_num_channels = order_base_2(num_comp_vectors);
priv->queue_mapping_channel_mask =
roundup_pow_of_two(num_comp_vectors) - 1;
priv->num_tc = priv->params.num_tc;
priv->default_vlan_prio = priv->params.default_vlan_prio;

Expand Down Expand Up @@ -1723,7 +1735,6 @@ static void mlx5e_build_netdev(struct net_device *netdev)

if (priv->num_tc > 1) {
mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
mlx5e_netdev_ops.ndo_start_xmit = mlx5e_xmit_multi_tc;
}

netdev->netdev_ops = &mlx5e_netdev_ops;
Expand Down Expand Up @@ -1793,9 +1804,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
if (mlx5e_check_required_hca_cap(mdev))
return NULL;

netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
roundup_pow_of_two(ncv) * MLX5E_MAX_NUM_TC,
ncv);
netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), ncv, ncv);
if (!netdev) {
mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
return NULL;
Expand Down
5 changes: 4 additions & 1 deletion drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,

bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
{
struct mlx5e_rq *rq = cq->sqrq;
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
int i;

/* avoid accessing cq (dma coherent memory) if not needed */
Expand All @@ -209,10 +209,13 @@ bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
if (!cqe)
break;

mlx5_cqwq_pop(&cq->wq);

wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
skb = rq->skb[wqe_counter];
prefetch(skb->data);
rq->skb[wqe_counter] = NULL;

dma_unmap_single(rq->pdev,
Expand Down
Loading

0 comments on commit 12d4ae9

Please sign in to comment.