git: 6b635c74fd41 - main - aesni: Push FPU sections down further

From: Mark Johnston <markj_at_FreeBSD.org>
Date: Tue, 29 Aug 2023 01:37:01 UTC
The branch main has been updated by markj:

URL: https://cgit.FreeBSD.org/src/commit/?id=6b635c74fd4135eaae68970bfc5aad9ae905fec7

commit 6b635c74fd4135eaae68970bfc5aad9ae905fec7
Author:     Mark Johnston <markj@FreeBSD.org>
AuthorDate: 2023-08-29 01:26:53 +0000
Commit:     Mark Johnston <markj@FreeBSD.org>
CommitDate: 2023-08-29 01:26:53 +0000

    aesni: Push FPU sections down further
    
    After commit 937b4473be21 aesni_cipher_crypt() and aesni_cipher_mac()
    execute in a FPU_KERN_NOCTX section, which means that they must run with
    preemption disabled.  These functions handle discontiguous I/O buffers
    by allocating a contiguous buffer and copying as necessary, but this
    allocation cannot happen with preemption disabled.  Fix the problem by
    pushing the FPU section down into aesni_cipher_crypt() and
    aesni_cipher_mac().  In particular, encrypt-then-auth transforms need
    not be handled with a single FPU section.
    
    Reported by:    syzbot+78258dbb02eb92157357@syzkaller.appspotmail.com
    Discussed with: jhb
    Fixes:          937b4473be21 ("aesni: Switch to using FPU_KERN_NOCTX.")
---
 sys/crypto/aesni/aesni.c | 12 +++++++++---
 1 file changed, 9 insertions(+), 3 deletions(-)

diff --git a/sys/crypto/aesni/aesni.c b/sys/crypto/aesni/aesni.c
index 6d83743dece9..5d5afd8aee41 100644
--- a/sys/crypto/aesni/aesni.c
+++ b/sys/crypto/aesni/aesni.c
@@ -594,8 +594,6 @@ aesni_cipher_process(struct aesni_session *ses, struct cryptop *crp)
 		break;
 	}
 
-	fpu_kern_enter(curthread, NULL, FPU_KERN_NORMAL | FPU_KERN_NOCTX);
-
 	/* Do work */
 	if (csp->csp_mode == CSP_MODE_ETA) {
 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
@@ -612,7 +610,6 @@ aesni_cipher_process(struct aesni_session *ses, struct cryptop *crp)
 	else
 		error = aesni_cipher_crypt(ses, crp, csp);
 
-	fpu_kern_leave(curthread, NULL);
 	return (error);
 }
 
@@ -677,6 +674,8 @@ aesni_cipher_crypt(struct aesni_session *ses, struct cryptop *crp,
 		outcopy = allocated;
 	}
 
+	fpu_kern_enter(curthread, NULL, FPU_KERN_NORMAL | FPU_KERN_NOCTX);
+
 	error = 0;
 	encflag = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
 	if (crp->crp_cipher_key != NULL)
@@ -749,6 +748,9 @@ aesni_cipher_crypt(struct aesni_session *ses, struct cryptop *crp,
 		}
 		break;
 	}
+
+	fpu_kern_leave(curthread, NULL);
+
 	if (outcopy && error == 0)
 		crypto_copyback(crp, CRYPTO_HAS_OUTPUT_BUFFER(crp) ?
 		    crp->crp_payload_output_start : crp->crp_payload_start,
@@ -784,6 +786,8 @@ aesni_cipher_mac(struct aesni_session *ses, struct cryptop *crp,
 		key = csp->csp_auth_key;
 	keylen = csp->csp_auth_klen;
 
+	fpu_kern_enter(curthread, NULL, FPU_KERN_NORMAL | FPU_KERN_NOCTX);
+
 	if (ses->hmac) {
 		uint8_t hmac_key[SHA1_BLOCK_LEN] __aligned(16);
 
@@ -849,6 +853,8 @@ aesni_cipher_mac(struct aesni_session *ses, struct cryptop *crp,
 		ses->hash_finalize(res, &sctx);
 	}
 
+	fpu_kern_leave(curthread, NULL);
+
 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
 		uint32_t res2[SHA2_256_HASH_LEN / sizeof(uint32_t)];