@@ -1000,25 +1000,118 @@ static int igc_write_mc_addr_list(struct net_device *netdev)
10001000 return netdev_mc_count (netdev );
10011001}
10021002
1003- static __le32 igc_tx_launchtime (struct igc_adapter * adapter , ktime_t txtime )
1003+ static __le32 igc_tx_launchtime (struct igc_ring * ring , ktime_t txtime ,
1004+ bool * first_flag , bool * insert_empty )
10041005{
1006+ struct igc_adapter * adapter = netdev_priv (ring -> netdev );
10051007 ktime_t cycle_time = adapter -> cycle_time ;
10061008 ktime_t base_time = adapter -> base_time ;
1009+ ktime_t now = ktime_get_clocktai ();
1010+ ktime_t baset_est , end_of_cycle ;
10071011 u32 launchtime ;
1012+ s64 n ;
10081013
1009- /* FIXME: when using ETF together with taprio, we may have a
1010- * case where 'delta' is larger than the cycle_time, this may
1011- * cause problems if we don't read the current value of
1012- * IGC_BASET, as the value writen into the launchtime
1013- * descriptor field may be misinterpreted.
1014+ n = div64_s64 (ktime_sub_ns (now , base_time ), cycle_time );
1015+
1016+ baset_est = ktime_add_ns (base_time , cycle_time * (n ));
1017+ end_of_cycle = ktime_add_ns (baset_est , cycle_time );
1018+
1019+ if (ktime_compare (txtime , end_of_cycle ) >= 0 ) {
1020+ if (baset_est != ring -> last_ff_cycle ) {
1021+ * first_flag = true;
1022+ ring -> last_ff_cycle = baset_est ;
1023+
1024+ if (ktime_compare (txtime , ring -> last_tx_cycle ) > 0 )
1025+ * insert_empty = true;
1026+ }
1027+ }
1028+
1029+ /* Introducing a window at end of cycle on which packets
1030+ * potentially not honor launchtime. Window of 5us chosen
1031+ * considering software update the tail pointer and packets
1032+ * are dma'ed to packet buffer.
10141033 */
1015- div_s64_rem (ktime_sub_ns (txtime , base_time ), cycle_time , & launchtime );
1034+ if ((ktime_sub_ns (end_of_cycle , now ) < 5 * NSEC_PER_USEC ))
1035+ netdev_warn (ring -> netdev , "Packet with txtime=%llu may not be honoured\n" ,
1036+ txtime );
1037+
1038+ ring -> last_tx_cycle = end_of_cycle ;
1039+
1040+ launchtime = ktime_sub_ns (txtime , baset_est );
1041+ if (launchtime > 0 )
1042+ div_s64_rem (launchtime , cycle_time , & launchtime );
1043+ else
1044+ launchtime = 0 ;
10161045
10171046 return cpu_to_le32 (launchtime );
10181047}
10191048
1049+ static int igc_init_empty_frame (struct igc_ring * ring ,
1050+ struct igc_tx_buffer * buffer ,
1051+ struct sk_buff * skb )
1052+ {
1053+ unsigned int size ;
1054+ dma_addr_t dma ;
1055+
1056+ size = skb_headlen (skb );
1057+
1058+ dma = dma_map_single (ring -> dev , skb -> data , size , DMA_TO_DEVICE );
1059+ if (dma_mapping_error (ring -> dev , dma )) {
1060+ netdev_err_once (ring -> netdev , "Failed to map DMA for TX\n" );
1061+ return - ENOMEM ;
1062+ }
1063+
1064+ buffer -> skb = skb ;
1065+ buffer -> protocol = 0 ;
1066+ buffer -> bytecount = skb -> len ;
1067+ buffer -> gso_segs = 1 ;
1068+ buffer -> time_stamp = jiffies ;
1069+ dma_unmap_len_set (buffer , len , skb -> len );
1070+ dma_unmap_addr_set (buffer , dma , dma );
1071+
1072+ return 0 ;
1073+ }
1074+
1075+ static int igc_init_tx_empty_descriptor (struct igc_ring * ring ,
1076+ struct sk_buff * skb ,
1077+ struct igc_tx_buffer * first )
1078+ {
1079+ union igc_adv_tx_desc * desc ;
1080+ u32 cmd_type , olinfo_status ;
1081+ int err ;
1082+
1083+ if (!igc_desc_unused (ring ))
1084+ return - EBUSY ;
1085+
1086+ err = igc_init_empty_frame (ring , first , skb );
1087+ if (err )
1088+ return err ;
1089+
1090+ cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
1091+ IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
1092+ first -> bytecount ;
1093+ olinfo_status = first -> bytecount << IGC_ADVTXD_PAYLEN_SHIFT ;
1094+
1095+ desc = IGC_TX_DESC (ring , ring -> next_to_use );
1096+ desc -> read .cmd_type_len = cpu_to_le32 (cmd_type );
1097+ desc -> read .olinfo_status = cpu_to_le32 (olinfo_status );
1098+ desc -> read .buffer_addr = cpu_to_le64 (dma_unmap_addr (first , dma ));
1099+
1100+ netdev_tx_sent_queue (txring_txq (ring ), skb -> len );
1101+
1102+ first -> next_to_watch = desc ;
1103+
1104+ ring -> next_to_use ++ ;
1105+ if (ring -> next_to_use == ring -> count )
1106+ ring -> next_to_use = 0 ;
1107+
1108+ return 0 ;
1109+ }
1110+
1111+ #define IGC_EMPTY_FRAME_SIZE 60
1112+
10201113static void igc_tx_ctxtdesc (struct igc_ring * tx_ring ,
1021- struct igc_tx_buffer * first ,
1114+ __le32 launch_time , bool first_flag ,
10221115 u32 vlan_macip_lens , u32 type_tucmd ,
10231116 u32 mss_l4len_idx )
10241117{
@@ -1037,26 +1130,17 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
10371130 if (test_bit (IGC_RING_FLAG_TX_CTX_IDX , & tx_ring -> flags ))
10381131 mss_l4len_idx |= tx_ring -> reg_idx << 4 ;
10391132
1133+ if (first_flag )
1134+ mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST ;
1135+
10401136 context_desc -> vlan_macip_lens = cpu_to_le32 (vlan_macip_lens );
10411137 context_desc -> type_tucmd_mlhl = cpu_to_le32 (type_tucmd );
10421138 context_desc -> mss_l4len_idx = cpu_to_le32 (mss_l4len_idx );
1043-
1044- /* We assume there is always a valid Tx time available. Invalid times
1045- * should have been handled by the upper layers.
1046- */
1047- if (tx_ring -> launchtime_enable ) {
1048- struct igc_adapter * adapter = netdev_priv (tx_ring -> netdev );
1049- ktime_t txtime = first -> skb -> tstamp ;
1050-
1051- skb_txtime_consumed (first -> skb );
1052- context_desc -> launch_time = igc_tx_launchtime (adapter ,
1053- txtime );
1054- } else {
1055- context_desc -> launch_time = 0 ;
1056- }
1139+ context_desc -> launch_time = launch_time ;
10571140}
10581141
1059- static void igc_tx_csum (struct igc_ring * tx_ring , struct igc_tx_buffer * first )
1142+ static void igc_tx_csum (struct igc_ring * tx_ring , struct igc_tx_buffer * first ,
1143+ __le32 launch_time , bool first_flag )
10601144{
10611145 struct sk_buff * skb = first -> skb ;
10621146 u32 vlan_macip_lens = 0 ;
@@ -1096,7 +1180,8 @@ static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
10961180 vlan_macip_lens |= skb_network_offset (skb ) << IGC_ADVTXD_MACLEN_SHIFT ;
10971181 vlan_macip_lens |= first -> tx_flags & IGC_TX_FLAGS_VLAN_MASK ;
10981182
1099- igc_tx_ctxtdesc (tx_ring , first , vlan_macip_lens , type_tucmd , 0 );
1183+ igc_tx_ctxtdesc (tx_ring , launch_time , first_flag ,
1184+ vlan_macip_lens , type_tucmd , 0 );
11001185}
11011186
11021187static int __igc_maybe_stop_tx (struct igc_ring * tx_ring , const u16 size )
@@ -1320,6 +1405,7 @@ static int igc_tx_map(struct igc_ring *tx_ring,
13201405
13211406static int igc_tso (struct igc_ring * tx_ring ,
13221407 struct igc_tx_buffer * first ,
1408+ __le32 launch_time , bool first_flag ,
13231409 u8 * hdr_len )
13241410{
13251411 u32 vlan_macip_lens , type_tucmd , mss_l4len_idx ;
@@ -1406,20 +1492,23 @@ static int igc_tso(struct igc_ring *tx_ring,
14061492 vlan_macip_lens |= (ip .hdr - skb -> data ) << IGC_ADVTXD_MACLEN_SHIFT ;
14071493 vlan_macip_lens |= first -> tx_flags & IGC_TX_FLAGS_VLAN_MASK ;
14081494
1409- igc_tx_ctxtdesc (tx_ring , first , vlan_macip_lens ,
1410- type_tucmd , mss_l4len_idx );
1495+ igc_tx_ctxtdesc (tx_ring , launch_time , first_flag ,
1496+ vlan_macip_lens , type_tucmd , mss_l4len_idx );
14111497
14121498 return 1 ;
14131499}
14141500
14151501static netdev_tx_t igc_xmit_frame_ring (struct sk_buff * skb ,
14161502 struct igc_ring * tx_ring )
14171503{
1504+ bool first_flag = false, insert_empty = false;
14181505 u16 count = TXD_USE_COUNT (skb_headlen (skb ));
14191506 __be16 protocol = vlan_get_protocol (skb );
14201507 struct igc_tx_buffer * first ;
1508+ __le32 launch_time = 0 ;
14211509 u32 tx_flags = 0 ;
14221510 unsigned short f ;
1511+ ktime_t txtime ;
14231512 u8 hdr_len = 0 ;
14241513 int tso = 0 ;
14251514
@@ -1433,11 +1522,40 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
14331522 count += TXD_USE_COUNT (skb_frag_size (
14341523 & skb_shinfo (skb )-> frags [f ]));
14351524
1436- if (igc_maybe_stop_tx (tx_ring , count + 3 )) {
1525+ if (igc_maybe_stop_tx (tx_ring , count + 5 )) {
14371526 /* this is a hard error */
14381527 return NETDEV_TX_BUSY ;
14391528 }
14401529
1530+ if (!tx_ring -> launchtime_enable )
1531+ goto done ;
1532+
1533+ txtime = skb -> tstamp ;
1534+ skb -> tstamp = ktime_set (0 , 0 );
1535+ launch_time = igc_tx_launchtime (tx_ring , txtime , & first_flag , & insert_empty );
1536+
1537+ if (insert_empty ) {
1538+ struct igc_tx_buffer * empty_info ;
1539+ struct sk_buff * empty ;
1540+ void * data ;
1541+
1542+ empty_info = & tx_ring -> tx_buffer_info [tx_ring -> next_to_use ];
1543+ empty = alloc_skb (IGC_EMPTY_FRAME_SIZE , GFP_ATOMIC );
1544+ if (!empty )
1545+ goto done ;
1546+
1547+ data = skb_put (empty , IGC_EMPTY_FRAME_SIZE );
1548+ memset (data , 0 , IGC_EMPTY_FRAME_SIZE );
1549+
1550+ igc_tx_ctxtdesc (tx_ring , 0 , false, 0 , 0 , 0 );
1551+
1552+ if (igc_init_tx_empty_descriptor (tx_ring ,
1553+ empty ,
1554+ empty_info ) < 0 )
1555+ dev_kfree_skb_any (empty );
1556+ }
1557+
1558+ done :
14411559 /* record the location of the first descriptor for this packet */
14421560 first = & tx_ring -> tx_buffer_info [tx_ring -> next_to_use ];
14431561 first -> type = IGC_TX_BUFFER_TYPE_SKB ;
@@ -1474,11 +1592,11 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
14741592 first -> tx_flags = tx_flags ;
14751593 first -> protocol = protocol ;
14761594
1477- tso = igc_tso (tx_ring , first , & hdr_len );
1595+ tso = igc_tso (tx_ring , first , launch_time , first_flag , & hdr_len );
14781596 if (tso < 0 )
14791597 goto out_drop ;
14801598 else if (!tso )
1481- igc_tx_csum (tx_ring , first );
1599+ igc_tx_csum (tx_ring , first , launch_time , first_flag );
14821600
14831601 igc_tx_map (tx_ring , first , hdr_len );
14841602
@@ -5925,10 +6043,16 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
59256043 bool queue_configured [IGC_MAX_TX_QUEUES ] = { };
59266044 u32 start_time = 0 , end_time = 0 ;
59276045 size_t n ;
6046+ int i ;
6047+
6048+ adapter -> qbv_enable = qopt -> enable ;
59286049
59296050 if (!qopt -> enable )
59306051 return igc_tsn_clear_schedule (adapter );
59316052
6053+ if (qopt -> base_time < 0 )
6054+ return - ERANGE ;
6055+
59326056 if (adapter -> base_time )
59336057 return - EALREADY ;
59346058
@@ -5940,10 +6064,24 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
59406064
59416065 for (n = 0 ; n < qopt -> num_entries ; n ++ ) {
59426066 struct tc_taprio_sched_entry * e = & qopt -> entries [n ];
5943- int i ;
59446067
59456068 end_time += e -> interval ;
59466069
6070+ /* If any of the conditions below are true, we need to manually
6071+ * control the end time of the cycle.
6072+ * 1. Qbv users can specify a cycle time that is not equal
6073+ * to the total GCL intervals. Hence, recalculation is
6074+ * necessary here to exclude the time interval that
6075+ * exceeds the cycle time.
6076+ * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2,
6077+ * once the end of the list is reached, it will switch
6078+ * to the END_OF_CYCLE state and leave the gates in the
6079+ * same state until the next cycle is started.
6080+ */
6081+ if (end_time > adapter -> cycle_time ||
6082+ n + 1 == qopt -> num_entries )
6083+ end_time = adapter -> cycle_time ;
6084+
59476085 for (i = 0 ; i < adapter -> num_tx_queues ; i ++ ) {
59486086 struct igc_ring * ring = adapter -> tx_ring [i ];
59496087
@@ -5964,6 +6102,18 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
59646102 start_time += e -> interval ;
59656103 }
59666104
6105+ /* Check whether a queue gets configured.
6106+ * If not, set the start and end time to be end time.
6107+ */
6108+ for (i = 0 ; i < adapter -> num_tx_queues ; i ++ ) {
6109+ if (!queue_configured [i ]) {
6110+ struct igc_ring * ring = adapter -> tx_ring [i ];
6111+
6112+ ring -> start_time = end_time ;
6113+ ring -> end_time = end_time ;
6114+ }
6115+ }
6116+
59676117 return 0 ;
59686118}
59696119
0 commit comments