Skip to content

Commit b1b5cb1

Browse files
rmchelsiokuba-moo
authored andcommitted
ch_ktls: Correction in finding correct length
There is a possibility of linear skbs coming in. Correcting the length extraction logic. v2->v3: - Separated un-related changes from this patch. Fixes: 5a4b9fe ("cxgb4/chcr: complete record tx handling") Signed-off-by: Rohit Maheshwari <rohitm@chelsio.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
1 parent 9d2e5e9 commit b1b5cb1

File tree

1 file changed

+8
-7
lines changed
  • drivers/net/ethernet/chelsio/inline_crypto/ch_ktls

1 file changed

+8
-7
lines changed

drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -967,7 +967,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
967967
/* packet length = eth hdr len + ip hdr len + tcp hdr len
968968
* (including options).
969969
*/
970-
pktlen = skb->len - skb->data_len;
970+
pktlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
971971

972972
ctrl = sizeof(*cpl) + pktlen;
973973
len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
@@ -1860,23 +1860,26 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
18601860
/* nic tls TX handler */
18611861
static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
18621862
{
1863+
u32 tls_end_offset, tcp_seq, skb_data_len, skb_offset;
18631864
struct ch_ktls_port_stats_debug *port_stats;
18641865
struct chcr_ktls_ofld_ctx_tx *tx_ctx;
18651866
struct ch_ktls_stats_debug *stats;
18661867
struct tcphdr *th = tcp_hdr(skb);
18671868
int data_len, qidx, ret = 0, mss;
18681869
struct tls_record_info *record;
18691870
struct chcr_ktls_info *tx_info;
1870-
u32 tls_end_offset, tcp_seq;
18711871
struct tls_context *tls_ctx;
18721872
struct sk_buff *local_skb;
18731873
struct sge_eth_txq *q;
18741874
struct adapter *adap;
18751875
unsigned long flags;
18761876

18771877
tcp_seq = ntohl(th->seq);
1878+
skb_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
1879+
skb_data_len = skb->len - skb_offset;
1880+
data_len = skb_data_len;
18781881

1879-
mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : skb->data_len;
1882+
mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len;
18801883

18811884
tls_ctx = tls_get_ctx(skb->sk);
18821885
if (unlikely(tls_ctx->netdev != dev))
@@ -1922,8 +1925,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
19221925
/* copy skb contents into local skb */
19231926
chcr_ktls_skb_copy(skb, local_skb);
19241927

1925-
/* go through the skb and send only one record at a time. */
1926-
data_len = skb->data_len;
19271928
/* TCP segments can be in received either complete or partial.
19281929
* chcr_end_part_handler will handle cases if complete record or end
19291930
* part of the record is received. Incase of partial end part of record,
@@ -2020,9 +2021,9 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
20202021

20212022
} while (data_len > 0);
20222023

2023-
tx_info->prev_seq = ntohl(th->seq) + skb->data_len;
2024+
tx_info->prev_seq = ntohl(th->seq) + skb_data_len;
20242025
atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
2025-
atomic64_add(skb->data_len, &port_stats->ktls_tx_encrypted_bytes);
2026+
atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
20262027

20272028
/* tcp finish is set, send a separate tcp msg including all the options
20282029
* as well.

0 commit comments

Comments
 (0)