poly1305-ppc.pl: Fix vector register clobbering
authorRohan McLure <rmclure@linux.ibm.com>
Thu, 4 Jan 2024 09:25:50 +0000 (10:25 +0100)
committerTomas Mraz <tomas@openssl.org>
Tue, 9 Jan 2024 14:46:39 +0000 (15:46 +0100)
Fixes CVE-2023-6129

The POLY1305 MAC (message authentication code) implementation in OpenSSL for
PowerPC CPUs saves the the contents of vector registers in different order
than they are restored. Thus the contents of some of these vector registers
is corrupted when returning to the caller. The vulnerable code is used only
on newer PowerPC processors supporting the PowerISA 2.07 instructions.

Reviewed-by: Matt Caswell <matt@openssl.org>
Reviewed-by: Richard Levitte <levitte@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/23200)

(cherry picked from commit 8d847a3ffd4f0b17ee33962cf69c36224925b34f)

crypto/poly1305/asm/poly1305-ppc.pl

index 9f86134d923fba4d37d82203c5706761b72af062..2e601bb9c24be73013d18e8982e4a65a5e4b92f3 100755 (executable)
@@ -744,7 +744,7 @@ ___
 my $LOCALS= 6*$SIZE_T;
 my $VSXFRAME = $LOCALS + 6*$SIZE_T;
    $VSXFRAME += 128;   # local variables
-   $VSXFRAME += 13*16; # v20-v31 offload
+   $VSXFRAME += 12*16; # v20-v31 offload
 
 my $BIG_ENDIAN = ($flavour !~ /le/) ? 4 : 0;
 
@@ -919,12 +919,12 @@ __poly1305_blocks_vsx:
        addi    r11,r11,32
        stvx    v22,r10,$sp
        addi    r10,r10,32
-       stvx    v23,r10,$sp
-       addi    r10,r10,32
-       stvx    v24,r11,$sp
+       stvx    v23,r11,$sp
        addi    r11,r11,32
-       stvx    v25,r10,$sp
+       stvx    v24,r10,$sp
        addi    r10,r10,32
+       stvx    v25,r11,$sp
+       addi    r11,r11,32
        stvx    v26,r10,$sp
        addi    r10,r10,32
        stvx    v27,r11,$sp
@@ -1153,12 +1153,12 @@ __poly1305_blocks_vsx:
        addi    r11,r11,32
        stvx    v22,r10,$sp
        addi    r10,r10,32
-       stvx    v23,r10,$sp
-       addi    r10,r10,32
-       stvx    v24,r11,$sp
+       stvx    v23,r11,$sp
        addi    r11,r11,32
-       stvx    v25,r10,$sp
+       stvx    v24,r10,$sp
        addi    r10,r10,32
+       stvx    v25,r11,$sp
+       addi    r11,r11,32
        stvx    v26,r10,$sp
        addi    r10,r10,32
        stvx    v27,r11,$sp
@@ -1899,26 +1899,26 @@ Ldone_vsx:
        mtspr   256,r12                         # restore vrsave
        lvx     v20,r10,$sp
        addi    r10,r10,32
-       lvx     v21,r10,$sp
-       addi    r10,r10,32
-       lvx     v22,r11,$sp
+       lvx     v21,r11,$sp
        addi    r11,r11,32
-       lvx     v23,r10,$sp
+       lvx     v22,r10,$sp
        addi    r10,r10,32
-       lvx     v24,r11,$sp
+       lvx     v23,r11,$sp
        addi    r11,r11,32
-       lvx     v25,r10,$sp
+       lvx     v24,r10,$sp
        addi    r10,r10,32
-       lvx     v26,r11,$sp
+       lvx     v25,r11,$sp
        addi    r11,r11,32
-       lvx     v27,r10,$sp
+       lvx     v26,r10,$sp
        addi    r10,r10,32
-       lvx     v28,r11,$sp
+       lvx     v27,r11,$sp
        addi    r11,r11,32
-       lvx     v29,r10,$sp
+       lvx     v28,r10,$sp
        addi    r10,r10,32
-       lvx     v30,r11,$sp
-       lvx     v31,r10,$sp
+       lvx     v29,r11,$sp
+       addi    r11,r11,32
+       lvx     v30,r10,$sp
+       lvx     v31,r11,$sp
        $POP    r27,`$VSXFRAME-$SIZE_T*5`($sp)
        $POP    r28,`$VSXFRAME-$SIZE_T*4`($sp)
        $POP    r29,`$VSXFRAME-$SIZE_T*3`($sp)