Skip to content

Commit

Permalink
Merge branch 'net-mana-big-tcp'
Browse files Browse the repository at this point in the history
Shradha Gupta says:

====================
net: Enable Big TCP for MANA devices

Allow the max gso/gro aggregated pkt size to go up to GSO_MAX_SIZE for
MANA NIC. On Azure, this not possible without allowing the same for
netvsc NIC (as the NICs are bonded together).
Therefore, we use netif_set_tso_max_size() to set max aggregated pkt
size
to VF's tso_max_size for netvsc too, when the data path is switched over
to the VF

The first patch allows MANA to configure aggregated pkt size of up-to
GSO_MAX_SIZE

The second patch enables the same on the netvsc NIC, if the data path
for the bonded NIC is switched to the VF

---
 Changes in v3
 * Add ipv6_hopopt_jumbo_remove() while sending Big TCP packets
---
  Changes in v2
  * Instead of using 'tcp segment' throughout the patch used the words
    'aggregated pkt size'
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
davem330 committed Feb 19, 2025
2 parents aefd232 + 6859209 commit 8e7d925
Show file tree
Hide file tree
Showing 4 changed files with 29 additions and 6 deletions.
5 changes: 5 additions & 0 deletions drivers/net/ethernet/microsoft/mana/mana_en.c
Original file line number Diff line number Diff line change
Expand Up @@ -256,6 +256,9 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (skb_cow_head(skb, MANA_HEADROOM))
goto tx_drop_count;

if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
goto tx_drop_count;

txq = &apc->tx_qp[txq_idx].txq;
gdma_sq = txq->gdma_sq;
cq = &apc->tx_qp[txq_idx].tx_cq;
Expand Down Expand Up @@ -2873,6 +2876,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
ndev->dev_port = port_idx;
SET_NETDEV_DEV(ndev, gc->dev);

netif_set_tso_max_size(ndev, GSO_MAX_SIZE);

netif_carrier_off(ndev);

netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
Expand Down
2 changes: 2 additions & 0 deletions drivers/net/hyperv/hyperv_net.h
Original file line number Diff line number Diff line change
Expand Up @@ -1166,6 +1166,8 @@ struct netvsc_device {
u32 max_chn;
u32 num_chn;

u32 netvsc_gso_max_size;

atomic_t open_chn;
struct work_struct subchan_work;
wait_queue_head_t subchan_open;
Expand Down
15 changes: 15 additions & 0 deletions drivers/net/hyperv/netvsc_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -2461,6 +2461,21 @@ static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event)
} else {
netdev_info(ndev, "Data path switched %s VF: %s\n",
vf_is_up ? "to" : "from", vf_netdev->name);

/* In Azure, when accelerated networking in enabled, other NICs
* like MANA, MLX, are configured as a bonded nic with
* Netvsc(failover) NIC. For bonded NICs, the min of the max
* pkt aggregate size of the members is propagated in the stack.
* In order to allow these NICs (MANA/MLX) to use up to
* GSO_MAX_SIZE gso packet size, we need to allow Netvsc NIC to
* also support this in the guest.
* This value is only increased for netvsc NIC when datapath is
* switched over to the VF
*/
if (vf_is_up)
netif_set_tso_max_size(ndev, vf_netdev->tso_max_size);
else
netif_set_tso_max_size(ndev, netvsc_dev->netvsc_gso_max_size);
}

return NOTIFY_OK;
Expand Down
13 changes: 7 additions & 6 deletions drivers/net/hyperv/rndis_filter.c
Original file line number Diff line number Diff line change
Expand Up @@ -1356,9 +1356,10 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
struct net_device_context *net_device_ctx = netdev_priv(net);
struct ndis_offload hwcaps;
struct ndis_offload_params offloads;
unsigned int gso_max_size = GSO_LEGACY_MAX_SIZE;
int ret;

nvdev->netvsc_gso_max_size = GSO_LEGACY_MAX_SIZE;

/* Find HW offload capabilities */
ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps);
if (ret != 0)
Expand Down Expand Up @@ -1390,8 +1391,8 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
net->hw_features |= NETIF_F_TSO;

if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
gso_max_size = hwcaps.lsov2.ip4_maxsz;
if (hwcaps.lsov2.ip4_maxsz < nvdev->netvsc_gso_max_size)
nvdev->netvsc_gso_max_size = hwcaps.lsov2.ip4_maxsz;
}

if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
Expand All @@ -1411,8 +1412,8 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
net->hw_features |= NETIF_F_TSO6;

if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
gso_max_size = hwcaps.lsov2.ip6_maxsz;
if (hwcaps.lsov2.ip6_maxsz < nvdev->netvsc_gso_max_size)
nvdev->netvsc_gso_max_size = hwcaps.lsov2.ip6_maxsz;
}

if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
Expand All @@ -1438,7 +1439,7 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
*/
net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;

netif_set_tso_max_size(net, gso_max_size);
netif_set_tso_max_size(net, nvdev->netvsc_gso_max_size);

ret = rndis_filter_set_offload_params(net, nvdev, &offloads);

Expand Down

0 comments on commit 8e7d925

Please sign in to comment.