72
72
#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
73
73
#define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \
74
74
MLX5_MPWRQ_WQE_PAGE_ORDER)
75
+ #define MLX5_CHANNEL_MAX_NUM_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8) * \
76
+ BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW))
77
+ #define MLX5_UMR_ALIGN (2048)
75
78
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
76
79
77
80
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
@@ -134,6 +137,13 @@ struct mlx5e_rx_wqe {
134
137
struct mlx5_wqe_data_seg data ;
135
138
};
136
139
140
+ struct mlx5e_umr_wqe {
141
+ struct mlx5_wqe_ctrl_seg ctrl ;
142
+ struct mlx5_wqe_umr_ctrl_seg uctrl ;
143
+ struct mlx5_mkey_seg mkc ;
144
+ struct mlx5_wqe_data_seg data ;
145
+ };
146
+
137
147
#ifdef CONFIG_MLX5_CORE_EN_DCB
138
148
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
139
149
#define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */
@@ -179,6 +189,7 @@ static const char vport_strings[][ETH_GSTRING_LEN] = {
179
189
"tx_queue_dropped" ,
180
190
"rx_wqe_err" ,
181
191
"rx_mpwqe_filler" ,
192
+ "rx_mpwqe_frag" ,
182
193
};
183
194
184
195
struct mlx5e_vport_stats {
@@ -221,8 +232,9 @@ struct mlx5e_vport_stats {
221
232
u64 tx_queue_dropped ;
222
233
u64 rx_wqe_err ;
223
234
u64 rx_mpwqe_filler ;
235
+ u64 rx_mpwqe_frag ;
224
236
225
- #define NUM_VPORT_COUNTERS 36
237
+ #define NUM_VPORT_COUNTERS 37
226
238
};
227
239
228
240
static const char pport_strings [][ETH_GSTRING_LEN ] = {
@@ -317,6 +329,7 @@ static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
317
329
"lro_bytes" ,
318
330
"wqe_err" ,
319
331
"mpwqe_filler" ,
332
+ "mpwqe_frag" ,
320
333
};
321
334
322
335
struct mlx5e_rq_stats {
@@ -328,7 +341,8 @@ struct mlx5e_rq_stats {
328
341
u64 lro_bytes ;
329
342
u64 wqe_err ;
330
343
u64 mpwqe_filler ;
331
- #define NUM_RQ_STATS 8
344
+ u64 mpwqe_frag ;
345
+ #define NUM_RQ_STATS 9
332
346
};
333
347
334
348
static const char sq_stats_strings [][ETH_GSTRING_LEN ] = {
@@ -407,6 +421,7 @@ struct mlx5e_tstamp {
407
421
408
422
enum {
409
423
MLX5E_RQ_STATE_POST_WQES_ENABLE ,
424
+ MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS ,
410
425
};
411
426
412
427
struct mlx5e_cq {
@@ -434,18 +449,14 @@ struct mlx5e_dma_info {
434
449
dma_addr_t addr ;
435
450
};
436
451
437
- struct mlx5e_mpw_info {
438
- struct mlx5e_dma_info dma_info ;
439
- u16 consumed_strides ;
440
- u16 skbs_frags [MLX5_MPWRQ_PAGES_PER_WQE ];
441
- };
442
-
443
452
struct mlx5e_rq {
444
453
/* data path */
445
454
struct mlx5_wq_ll wq ;
446
455
u32 wqe_sz ;
447
456
struct sk_buff * * skb ;
448
457
struct mlx5e_mpw_info * wqe_info ;
458
+ __be32 mkey_be ;
459
+ __be32 umr_mkey_be ;
449
460
450
461
struct device * pdev ;
451
462
struct net_device * netdev ;
@@ -466,6 +477,36 @@ struct mlx5e_rq {
466
477
struct mlx5e_priv * priv ;
467
478
} ____cacheline_aligned_in_smp ;
468
479
480
+ struct mlx5e_umr_dma_info {
481
+ __be64 * mtt ;
482
+ __be64 * mtt_no_align ;
483
+ dma_addr_t mtt_addr ;
484
+ struct mlx5e_dma_info * dma_info ;
485
+ };
486
+
487
+ struct mlx5e_mpw_info {
488
+ union {
489
+ struct mlx5e_dma_info dma_info ;
490
+ struct mlx5e_umr_dma_info umr ;
491
+ };
492
+ u16 consumed_strides ;
493
+ u16 skbs_frags [MLX5_MPWRQ_PAGES_PER_WQE ];
494
+
495
+ void (* dma_pre_sync )(struct device * pdev ,
496
+ struct mlx5e_mpw_info * wi ,
497
+ u32 wqe_offset , u32 len );
498
+ void (* add_skb_frag )(struct device * pdev ,
499
+ struct sk_buff * skb ,
500
+ struct mlx5e_mpw_info * wi ,
501
+ u32 page_idx , u32 frag_offset , u32 len );
502
+ void (* copy_skb_header )(struct device * pdev ,
503
+ struct sk_buff * skb ,
504
+ struct mlx5e_mpw_info * wi ,
505
+ u32 page_idx , u32 offset ,
506
+ u32 headlen );
507
+ void (* free_wqe )(struct mlx5e_rq * rq , struct mlx5e_mpw_info * wi );
508
+ };
509
+
469
510
struct mlx5e_tx_wqe_info {
470
511
u32 num_bytes ;
471
512
u8 num_wqebbs ;
@@ -658,6 +699,7 @@ struct mlx5e_priv {
658
699
u32 pdn ;
659
700
u32 tdn ;
660
701
struct mlx5_core_mkey mkey ;
702
+ struct mlx5_core_mkey umr_mkey ;
661
703
struct mlx5e_rq drop_rq ;
662
704
663
705
struct mlx5e_channel * * channel ;
@@ -730,6 +772,21 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
730
772
bool mlx5e_post_rx_wqes (struct mlx5e_rq * rq );
731
773
int mlx5e_alloc_rx_wqe (struct mlx5e_rq * rq , struct mlx5e_rx_wqe * wqe , u16 ix );
732
774
int mlx5e_alloc_rx_mpwqe (struct mlx5e_rq * rq , struct mlx5e_rx_wqe * wqe , u16 ix );
775
+ void mlx5e_post_rx_fragmented_mpwqe (struct mlx5e_rq * rq );
776
+ void mlx5e_complete_rx_linear_mpwqe (struct mlx5e_rq * rq ,
777
+ struct mlx5_cqe64 * cqe ,
778
+ u16 byte_cnt ,
779
+ struct mlx5e_mpw_info * wi ,
780
+ struct sk_buff * skb );
781
+ void mlx5e_complete_rx_fragmented_mpwqe (struct mlx5e_rq * rq ,
782
+ struct mlx5_cqe64 * cqe ,
783
+ u16 byte_cnt ,
784
+ struct mlx5e_mpw_info * wi ,
785
+ struct sk_buff * skb );
786
+ void mlx5e_free_rx_linear_mpwqe (struct mlx5e_rq * rq ,
787
+ struct mlx5e_mpw_info * wi );
788
+ void mlx5e_free_rx_fragmented_mpwqe (struct mlx5e_rq * rq ,
789
+ struct mlx5e_mpw_info * wi );
733
790
struct mlx5_cqe64 * mlx5e_get_cqe (struct mlx5e_cq * cq );
734
791
735
792
void mlx5e_update_stats (struct mlx5e_priv * priv );
@@ -763,7 +820,7 @@ void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
763
820
int num_channels );
764
821
765
822
static inline void mlx5e_tx_notify_hw (struct mlx5e_sq * sq ,
766
- struct mlx5e_tx_wqe * wqe , int bf_sz )
823
+ struct mlx5_wqe_ctrl_seg * ctrl , int bf_sz )
767
824
{
768
825
u16 ofst = MLX5_BF_OFFSET + sq -> bf_offset ;
769
826
@@ -777,9 +834,9 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
777
834
*/
778
835
wmb ();
779
836
if (bf_sz )
780
- __iowrite64_copy (sq -> uar_map + ofst , & wqe -> ctrl , bf_sz );
837
+ __iowrite64_copy (sq -> uar_map + ofst , ctrl , bf_sz );
781
838
else
782
- mlx5_write64 ((__be32 * )& wqe -> ctrl , sq -> uar_map + ofst , NULL );
839
+ mlx5_write64 ((__be32 * )ctrl , sq -> uar_map + ofst , NULL );
783
840
/* flush the write-combining mapped buffer */
784
841
wmb ();
785
842
@@ -800,6 +857,11 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
800
857
MLX5E_MAX_NUM_CHANNELS );
801
858
}
802
859
860
+ static inline int mlx5e_get_mtt_octw (int npages )
861
+ {
862
+ return ALIGN (npages , 8 ) / 2 ;
863
+ }
864
+
803
865
extern const struct ethtool_ops mlx5e_ethtool_ops ;
804
866
#ifdef CONFIG_MLX5_CORE_EN_DCB
805
867
extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops ;
0 commit comments