git: bfd509e5fbad - 2024Q1 - security/openssl-quictls: Security fix for POLY1305 on ppc

From: Bernard Spil <brnrd_at_FreeBSD.org>
Date: Wed, 31 Jan 2024 19:52:01 UTC
The branch 2024Q1 has been updated by brnrd:

URL: https://cgit.FreeBSD.org/ports/commit/?id=bfd509e5fbad240b89f1d3bca1191b90afa574e9

commit bfd509e5fbad240b89f1d3bca1191b90afa574e9
Author:     Bernard Spil <brnrd@FreeBSD.org>
AuthorDate: 2024-01-11 12:48:08 +0000
Commit:     Bernard Spil <brnrd@FreeBSD.org>
CommitDate: 2024-01-31 19:51:43 +0000

    security/openssl-quictls: Security fix for POLY1305 on ppc
    
    Security:       8337251b-b07b-11ee-b0d7-84a93843eb75
    MFH:            2024Q1
    (cherry picked from commit f7b0dd64e0dc5bf46e5250a63a4f8e5a678e1b13)
---
 security/openssl-quictls/Makefile                  |   2 +-
 security/openssl-quictls/files/patch-CVE-2023-6129 | 109 +++++++++++++++++++++
 2 files changed, 110 insertions(+), 1 deletion(-)

diff --git a/security/openssl-quictls/Makefile b/security/openssl-quictls/Makefile
index f602baf253bf..6234907e0b2d 100644
--- a/security/openssl-quictls/Makefile
+++ b/security/openssl-quictls/Makefile
@@ -1,6 +1,6 @@
 PORTNAME=	openssl
 PORTVERSION=	3.0.12
-PORTREVISION=	1
+PORTREVISION=	2
 CATEGORIES=	security devel
 PKGNAMESUFFIX=	-quictls
 
diff --git a/security/openssl-quictls/files/patch-CVE-2023-6129 b/security/openssl-quictls/files/patch-CVE-2023-6129
new file mode 100644
index 000000000000..9e42dbb683a5
--- /dev/null
+++ b/security/openssl-quictls/files/patch-CVE-2023-6129
@@ -0,0 +1,109 @@
+From 050d26383d4e264966fb83428e72d5d48f402d35 Mon Sep 17 00:00:00 2001
+From: Rohan McLure <rmclure@linux.ibm.com>
+Date: Thu, 4 Jan 2024 10:25:50 +0100
+Subject: [PATCH] poly1305-ppc.pl: Fix vector register clobbering
+
+Fixes CVE-2023-6129
+
+The POLY1305 MAC (message authentication code) implementation in OpenSSL for
+PowerPC CPUs saves the the contents of vector registers in different order
+than they are restored. Thus the contents of some of these vector registers
+is corrupted when returning to the caller. The vulnerable code is used only
+on newer PowerPC processors supporting the PowerISA 2.07 instructions.
+
+Reviewed-by: Matt Caswell <matt@openssl.org>
+Reviewed-by: Richard Levitte <levitte@openssl.org>
+Reviewed-by: Tomas Mraz <tomas@openssl.org>
+(Merged from https://github.com/openssl/openssl/pull/23200)
+
+(cherry picked from commit 8d847a3ffd4f0b17ee33962cf69c36224925b34f)
+---
+ crypto/poly1305/asm/poly1305-ppc.pl | 42 ++++++++++++++---------------
+ 1 file changed, 21 insertions(+), 21 deletions(-)
+
+diff --git a/crypto/poly1305/asm/poly1305-ppc.pl b/crypto/poly1305/asm/poly1305-ppc.pl
+index 9f86134d923fb..2e601bb9c24be 100755
+--- crypto/poly1305/asm/poly1305-ppc.pl.orig
++++ crypto/poly1305/asm/poly1305-ppc.pl
+@@ -744,7 +744,7 @@
+ my $LOCALS= 6*$SIZE_T;
+ my $VSXFRAME = $LOCALS + 6*$SIZE_T;
+    $VSXFRAME += 128;	# local variables
+-   $VSXFRAME += 13*16;	# v20-v31 offload
++   $VSXFRAME += 12*16;	# v20-v31 offload
+ 
+ my $BIG_ENDIAN = ($flavour !~ /le/) ? 4 : 0;
+ 
+@@ -919,12 +919,12 @@
+ 	addi	r11,r11,32
+ 	stvx	v22,r10,$sp
+ 	addi	r10,r10,32
+-	stvx	v23,r10,$sp
+-	addi	r10,r10,32
+-	stvx	v24,r11,$sp
++	stvx	v23,r11,$sp
+ 	addi	r11,r11,32
+-	stvx	v25,r10,$sp
++	stvx	v24,r10,$sp
+ 	addi	r10,r10,32
++	stvx	v25,r11,$sp
++	addi	r11,r11,32
+ 	stvx	v26,r10,$sp
+ 	addi	r10,r10,32
+ 	stvx	v27,r11,$sp
+@@ -1153,12 +1153,12 @@
+ 	addi	r11,r11,32
+ 	stvx	v22,r10,$sp
+ 	addi	r10,r10,32
+-	stvx	v23,r10,$sp
+-	addi	r10,r10,32
+-	stvx	v24,r11,$sp
++	stvx	v23,r11,$sp
+ 	addi	r11,r11,32
+-	stvx	v25,r10,$sp
++	stvx	v24,r10,$sp
+ 	addi	r10,r10,32
++	stvx	v25,r11,$sp
++	addi	r11,r11,32
+ 	stvx	v26,r10,$sp
+ 	addi	r10,r10,32
+ 	stvx	v27,r11,$sp
+@@ -1899,26 +1899,26 @@
+ 	mtspr	256,r12				# restore vrsave
+ 	lvx	v20,r10,$sp
+ 	addi	r10,r10,32
+-	lvx	v21,r10,$sp
+-	addi	r10,r10,32
+-	lvx	v22,r11,$sp
++	lvx	v21,r11,$sp
+ 	addi	r11,r11,32
+-	lvx	v23,r10,$sp
++	lvx	v22,r10,$sp
+ 	addi	r10,r10,32
+-	lvx	v24,r11,$sp
++	lvx	v23,r11,$sp
+ 	addi	r11,r11,32
+-	lvx	v25,r10,$sp
++	lvx	v24,r10,$sp
+ 	addi	r10,r10,32
+-	lvx	v26,r11,$sp
++	lvx	v25,r11,$sp
+ 	addi	r11,r11,32
+-	lvx	v27,r10,$sp
++	lvx	v26,r10,$sp
+ 	addi	r10,r10,32
+-	lvx	v28,r11,$sp
++	lvx	v27,r11,$sp
+ 	addi	r11,r11,32
+-	lvx	v29,r10,$sp
++	lvx	v28,r10,$sp
+ 	addi	r10,r10,32
+-	lvx	v30,r11,$sp
+-	lvx	v31,r10,$sp
++	lvx	v29,r11,$sp
++	addi	r11,r11,32
++	lvx	v30,r10,$sp
++	lvx	v31,r11,$sp
+ 	$POP	r27,`$VSXFRAME-$SIZE_T*5`($sp)
+ 	$POP	r28,`$VSXFRAME-$SIZE_T*4`($sp)
+ 	$POP	r29,`$VSXFRAME-$SIZE_T*3`($sp)