52
52
#include "fs_core.h"
53
53
#include "en/port.h"
54
54
#include "en/tc_tun.h"
55
+ #include "lib/devcom.h"
55
56
56
57
struct mlx5_nic_flow_attr {
57
58
u32 action ;
74
75
MLX5E_TC_FLOW_HAIRPIN = BIT (MLX5E_TC_FLOW_BASE + 3 ),
75
76
MLX5E_TC_FLOW_HAIRPIN_RSS = BIT (MLX5E_TC_FLOW_BASE + 4 ),
76
77
MLX5E_TC_FLOW_SLOW = BIT (MLX5E_TC_FLOW_BASE + 5 ),
78
+ MLX5E_TC_FLOW_DUP = BIT (MLX5E_TC_FLOW_BASE + 6 ),
77
79
};
78
80
79
81
#define MLX5E_TC_MAX_SPLITS 1
@@ -111,8 +113,10 @@ struct mlx5e_tc_flow {
111
113
* destinations.
112
114
*/
113
115
struct encap_flow_item encaps [MLX5_MAX_FLOW_FWD_VPORTS ];
116
+ struct mlx5e_tc_flow * peer_flow ;
114
117
struct list_head mod_hdr ; /* flows sharing the same mod hdr ID */
115
118
struct list_head hairpin ; /* flows sharing the same hairpin */
119
+ struct list_head peer ; /* flows with peer flow */
116
120
union {
117
121
struct mlx5_esw_flow_attr esw_attr [0 ];
118
122
struct mlx5_nic_flow_attr nic_attr [0 ];
@@ -1249,13 +1253,48 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1249
1253
}
1250
1254
}
1251
1255
1256
+ static void __mlx5e_tc_del_fdb_peer_flow (struct mlx5e_tc_flow * flow )
1257
+ {
1258
+ struct mlx5_eswitch * esw = flow -> priv -> mdev -> priv .eswitch ;
1259
+
1260
+ if (!(flow -> flags & MLX5E_TC_FLOW_ESWITCH ) ||
1261
+ !(flow -> flags & MLX5E_TC_FLOW_DUP ))
1262
+ return ;
1263
+
1264
+ mutex_lock (& esw -> offloads .peer_mutex );
1265
+ list_del (& flow -> peer );
1266
+ mutex_unlock (& esw -> offloads .peer_mutex );
1267
+
1268
+ flow -> flags &= ~MLX5E_TC_FLOW_DUP ;
1269
+
1270
+ mlx5e_tc_del_fdb_flow (flow -> peer_flow -> priv , flow -> peer_flow );
1271
+ kvfree (flow -> peer_flow );
1272
+ flow -> peer_flow = NULL ;
1273
+ }
1274
+
1275
+ static void mlx5e_tc_del_fdb_peer_flow (struct mlx5e_tc_flow * flow )
1276
+ {
1277
+ struct mlx5_core_dev * dev = flow -> priv -> mdev ;
1278
+ struct mlx5_devcom * devcom = dev -> priv .devcom ;
1279
+ struct mlx5_eswitch * peer_esw ;
1280
+
1281
+ peer_esw = mlx5_devcom_get_peer_data (devcom , MLX5_DEVCOM_ESW_OFFLOADS );
1282
+ if (!peer_esw )
1283
+ return ;
1284
+
1285
+ __mlx5e_tc_del_fdb_peer_flow (flow );
1286
+ mlx5_devcom_release_peer_data (devcom , MLX5_DEVCOM_ESW_OFFLOADS );
1287
+ }
1288
+
1252
1289
static void mlx5e_tc_del_flow (struct mlx5e_priv * priv ,
1253
1290
struct mlx5e_tc_flow * flow )
1254
1291
{
1255
- if (flow -> flags & MLX5E_TC_FLOW_ESWITCH )
1292
+ if (flow -> flags & MLX5E_TC_FLOW_ESWITCH ) {
1293
+ mlx5e_tc_del_fdb_peer_flow (flow );
1256
1294
mlx5e_tc_del_fdb_flow (priv , flow );
1257
- else
1295
+ } else {
1258
1296
mlx5e_tc_del_nic_flow (priv , flow );
1297
+ }
1259
1298
}
1260
1299
1261
1300
@@ -2660,6 +2699,11 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv)
2660
2699
return & priv -> fs .tc .ht ;
2661
2700
}
2662
2701
2702
+ static bool is_peer_flow_needed (struct mlx5e_tc_flow * flow )
2703
+ {
2704
+ return false;
2705
+ }
2706
+
2663
2707
static int
2664
2708
mlx5e_alloc_flow (struct mlx5e_priv * priv , int attr_size ,
2665
2709
struct tc_cls_flower_offload * f , u16 flow_flags ,
@@ -2693,11 +2737,13 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
2693
2737
}
2694
2738
2695
2739
static int
2696
- mlx5e_add_fdb_flow (struct mlx5e_priv * priv ,
2697
- struct tc_cls_flower_offload * f ,
2698
- u16 flow_flags ,
2699
- struct net_device * filter_dev ,
2700
- struct mlx5e_tc_flow * * __flow )
2740
+ __mlx5e_add_fdb_flow (struct mlx5e_priv * priv ,
2741
+ struct tc_cls_flower_offload * f ,
2742
+ u16 flow_flags ,
2743
+ struct net_device * filter_dev ,
2744
+ struct mlx5_eswitch_rep * in_rep ,
2745
+ struct mlx5_core_dev * in_mdev ,
2746
+ struct mlx5e_tc_flow * * __flow )
2701
2747
{
2702
2748
struct netlink_ext_ack * extack = f -> common .extack ;
2703
2749
struct mlx5e_tc_flow_parse_attr * parse_attr ;
@@ -2723,6 +2769,8 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
2723
2769
if (err )
2724
2770
goto err_free ;
2725
2771
2772
+ flow -> esw_attr -> in_rep = in_rep ;
2773
+ flow -> esw_attr -> in_mdev = in_mdev ;
2726
2774
err = mlx5e_tc_add_fdb_flow (priv , parse_attr , flow , extack );
2727
2775
if (err )
2728
2776
goto err_free ;
@@ -2738,6 +2786,87 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
2738
2786
return err ;
2739
2787
}
2740
2788
2789
+ static int mlx5e_tc_add_fdb_peer_flow (struct tc_cls_flower_offload * f ,
2790
+ struct mlx5e_tc_flow * flow )
2791
+ {
2792
+ struct mlx5e_priv * priv = flow -> priv , * peer_priv ;
2793
+ struct mlx5_eswitch * esw = priv -> mdev -> priv .eswitch , * peer_esw ;
2794
+ struct mlx5_devcom * devcom = priv -> mdev -> priv .devcom ;
2795
+ struct mlx5e_tc_flow_parse_attr * parse_attr ;
2796
+ struct mlx5e_rep_priv * peer_urpriv ;
2797
+ struct mlx5e_tc_flow * peer_flow ;
2798
+ struct mlx5_core_dev * in_mdev ;
2799
+ int err = 0 ;
2800
+
2801
+ peer_esw = mlx5_devcom_get_peer_data (devcom , MLX5_DEVCOM_ESW_OFFLOADS );
2802
+ if (!peer_esw )
2803
+ return - ENODEV ;
2804
+
2805
+ peer_urpriv = mlx5_eswitch_get_uplink_priv (peer_esw , REP_ETH );
2806
+ peer_priv = netdev_priv (peer_urpriv -> netdev );
2807
+
2808
+ /* in_mdev is assigned of which the packet originated from.
2809
+ * So packets redirected to uplink use the same mdev of the
2810
+ * original flow and packets redirected from uplink use the
2811
+ * peer mdev.
2812
+ */
2813
+ if (flow -> esw_attr -> in_rep -> vport == FDB_UPLINK_VPORT )
2814
+ in_mdev = peer_priv -> mdev ;
2815
+ else
2816
+ in_mdev = priv -> mdev ;
2817
+
2818
+ parse_attr = flow -> esw_attr -> parse_attr ;
2819
+ err = __mlx5e_add_fdb_flow (peer_priv , f , flow -> flags ,
2820
+ parse_attr -> filter_dev ,
2821
+ flow -> esw_attr -> in_rep , in_mdev , & peer_flow );
2822
+ if (err )
2823
+ goto out ;
2824
+
2825
+ flow -> peer_flow = peer_flow ;
2826
+ flow -> flags |= MLX5E_TC_FLOW_DUP ;
2827
+ mutex_lock (& esw -> offloads .peer_mutex );
2828
+ list_add_tail (& flow -> peer , & esw -> offloads .peer_flows );
2829
+ mutex_unlock (& esw -> offloads .peer_mutex );
2830
+
2831
+ out :
2832
+ mlx5_devcom_release_peer_data (devcom , MLX5_DEVCOM_ESW_OFFLOADS );
2833
+ return err ;
2834
+ }
2835
+
2836
+ static int
2837
+ mlx5e_add_fdb_flow (struct mlx5e_priv * priv ,
2838
+ struct tc_cls_flower_offload * f ,
2839
+ u16 flow_flags ,
2840
+ struct net_device * filter_dev ,
2841
+ struct mlx5e_tc_flow * * __flow )
2842
+ {
2843
+ struct mlx5e_rep_priv * rpriv = priv -> ppriv ;
2844
+ struct mlx5_eswitch_rep * in_rep = rpriv -> rep ;
2845
+ struct mlx5_core_dev * in_mdev = priv -> mdev ;
2846
+ struct mlx5e_tc_flow * flow ;
2847
+ int err ;
2848
+
2849
+ err = __mlx5e_add_fdb_flow (priv , f , flow_flags , filter_dev , in_rep ,
2850
+ in_mdev , & flow );
2851
+ if (err )
2852
+ goto out ;
2853
+
2854
+ if (is_peer_flow_needed (flow )) {
2855
+ err = mlx5e_tc_add_fdb_peer_flow (f , flow );
2856
+ if (err ) {
2857
+ mlx5e_tc_del_fdb_flow (priv , flow );
2858
+ goto out ;
2859
+ }
2860
+ }
2861
+
2862
+ * __flow = flow ;
2863
+
2864
+ return 0 ;
2865
+
2866
+ out :
2867
+ return err ;
2868
+ }
2869
+
2741
2870
static int
2742
2871
mlx5e_add_nic_flow (struct mlx5e_priv * priv ,
2743
2872
struct tc_cls_flower_offload * f ,
@@ -2882,7 +3011,9 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
2882
3011
int mlx5e_stats_flower (struct net_device * dev , struct mlx5e_priv * priv ,
2883
3012
struct tc_cls_flower_offload * f , int flags )
2884
3013
{
3014
+ struct mlx5_devcom * devcom = priv -> mdev -> priv .devcom ;
2885
3015
struct rhashtable * tc_ht = get_tc_ht (priv );
3016
+ struct mlx5_eswitch * peer_esw ;
2886
3017
struct mlx5e_tc_flow * flow ;
2887
3018
struct mlx5_fc * counter ;
2888
3019
u64 bytes ;
@@ -2902,6 +3033,27 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
2902
3033
2903
3034
mlx5_fc_query_cached (counter , & bytes , & packets , & lastuse );
2904
3035
3036
+ peer_esw = mlx5_devcom_get_peer_data (devcom , MLX5_DEVCOM_ESW_OFFLOADS );
3037
+ if (!peer_esw )
3038
+ goto out ;
3039
+
3040
+ if ((flow -> flags & MLX5E_TC_FLOW_DUP ) &&
3041
+ (flow -> peer_flow -> flags & MLX5E_TC_FLOW_OFFLOADED )) {
3042
+ u64 bytes2 ;
3043
+ u64 packets2 ;
3044
+ u64 lastuse2 ;
3045
+
3046
+ counter = mlx5e_tc_get_counter (flow -> peer_flow );
3047
+ mlx5_fc_query_cached (counter , & bytes2 , & packets2 , & lastuse2 );
3048
+
3049
+ bytes += bytes2 ;
3050
+ packets += packets2 ;
3051
+ lastuse = max_t (u64 , lastuse , lastuse2 );
3052
+ }
3053
+
3054
+ mlx5_devcom_release_peer_data (devcom , MLX5_DEVCOM_ESW_OFFLOADS );
3055
+
3056
+ out :
2905
3057
tcf_exts_stats_update (f -> exts , bytes , packets , lastuse );
2906
3058
2907
3059
return 0 ;
@@ -3014,3 +3166,11 @@ int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
3014
3166
3015
3167
return atomic_read (& tc_ht -> nelems );
3016
3168
}
3169
+
3170
+ void mlx5e_tc_clean_fdb_peer_flows (struct mlx5_eswitch * esw )
3171
+ {
3172
+ struct mlx5e_tc_flow * flow , * tmp ;
3173
+
3174
+ list_for_each_entry_safe (flow , tmp , & esw -> offloads .peer_flows , peer )
3175
+ __mlx5e_tc_del_fdb_peer_flow (flow );
3176
+ }
0 commit comments