summaryrefslogtreecommitdiff
path: root/net/tls
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2022-04-08 11:31:34 -0700
committerDavid S. Miller <davem@davemloft.net>2022-04-10 17:32:12 +0100
commitf940b6efb17257844341d04b4fc622752c23cb9f (patch)
treefdcd97ef1acda17a2d02f336d52feba54e800471 /net/tls
parentb1a2c1786330286f4b31c4bb9fd1d5ac8bb09807 (diff)
tls: rx: jump out for cases which need to leave skb on list
The current invese logic is harder to follow (and adds extra tests to the fast path). We have to enumerate all cases which need to keep the skb before consuming it. It's simpler to jump out of the full record flow as we detect those cases. This makes it clear that partial consumption and peek can only reach end of the function thru the !zc case so move the code up there. Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tls')
-rw-r--r--net/tls/tls_sw.c43
1 files changed, 22 insertions, 21 deletions
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 71d8082647c8..2e8a896af81a 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1773,7 +1773,6 @@ int tls_sw_recvmsg(struct sock *sk,
decrypted = 0;
while (len && (decrypted + copied < target || ctx->recv_pkt)) {
struct tls_decrypt_arg darg = {};
- bool retain_skb = false;
int to_decrypt, chunk;
skb = tls_wait_data(sk, psock, flags & MSG_DONTWAIT, timeo, &err);
@@ -1833,12 +1832,17 @@ int tls_sw_recvmsg(struct sock *sk,
if (async) {
/* TLS 1.2-only, to_decrypt must be text length */
chunk = min_t(int, to_decrypt, len);
- goto pick_next_record;
+leave_on_list:
+ decrypted += chunk;
+ len -= chunk;
+ continue;
}
/* TLS 1.3 may have updated the length by more than overhead */
chunk = rxm->full_len;
if (!darg.zc) {
+ bool partially_consumed = chunk > len;
+
if (bpf_strp_enabled) {
err = sk_psock_tls_strp_read(psock, skb);
if (err != __SK_PASS) {
@@ -1851,39 +1855,36 @@ int tls_sw_recvmsg(struct sock *sk,
}
}
- if (chunk > len) {
- retain_skb = true;
+ if (partially_consumed)
chunk = len;
- }
err = skb_copy_datagram_msg(skb, rxm->offset,
msg, chunk);
if (err < 0)
goto recv_end;
- if (!is_peek) {
- rxm->offset = rxm->offset + chunk;
- rxm->full_len = rxm->full_len - chunk;
+ if (is_peek)
+ goto leave_on_list;
+
+ if (partially_consumed) {
+ rxm->offset += chunk;
+ rxm->full_len -= chunk;
+ goto leave_on_list;
}
}
-pick_next_record:
decrypted += chunk;
len -= chunk;
- /* For async or peek case, queue the current skb */
- if (!(async || is_peek || retain_skb)) {
- skb_unlink(skb, &ctx->rx_list);
- consume_skb(skb);
+ skb_unlink(skb, &ctx->rx_list);
+ consume_skb(skb);
- /* Return full control message to
- * userspace before trying to parse
- * another message type
- */
- msg->msg_flags |= MSG_EOR;
- if (control != TLS_RECORD_TYPE_DATA)
- goto recv_end;
- }
+ /* Return full control message to userspace before trying
+ * to parse another message type
+ */
+ msg->msg_flags |= MSG_EOR;
+ if (control != TLS_RECORD_TYPE_DATA)
+ break;
}
recv_end: