aes/asm/aesv8-armx.pl: avoid 32-bit lane assignment in CTR mode
[openssl.git] / crypto / aes / asm / aesv8-armx.pl
index ee2e29823ac7a19af6a57bb874cda3cd2360a2c0..9532db70e259452ec1486d40f8f54cafd6737474 100755 (executable)
@@ -1797,17 +1797,17 @@ $code.=<<___;
 #ifndef __ARMEB__
        rev             $ctr, $ctr
 #endif
-       vorr            $dat1,$dat0,$dat0
        add             $tctr1, $ctr, #1
-       vorr            $dat2,$dat0,$dat0
-       add             $ctr, $ctr, #2
        vorr            $ivec,$dat0,$dat0
        rev             $tctr1, $tctr1
-       vmov.32         ${dat1}[3],$tctr1
+       vmov.32         ${ivec}[3],$tctr1
+       add             $ctr, $ctr, #2
+       vorr            $dat1,$ivec,$ivec
        b.ls            .Lctr32_tail
        rev             $tctr2, $ctr
+       vmov.32         ${ivec}[3],$tctr2
        sub             $len,$len,#3            // bias
-       vmov.32         ${dat2}[3],$tctr2
+       vorr            $dat2,$ivec,$ivec
 ___
 $code.=<<___   if ($flavour =~ /64/);
        cmp             $len,#2
@@ -2003,11 +2003,11 @@ $code.=<<___;
        aese            $dat1,q8
        aesmc           $tmp1,$dat1
         vld1.8         {$in0},[$inp],#16
-        vorr           $dat0,$ivec,$ivec
+        add            $tctr0,$ctr,#1
        aese            $dat2,q8
        aesmc           $dat2,$dat2
         vld1.8         {$in1},[$inp],#16
-        vorr           $dat1,$ivec,$ivec
+        rev            $tctr0,$tctr0
        aese            $tmp0,q9
        aesmc           $tmp0,$tmp0
        aese            $tmp1,q9
@@ -2016,8 +2016,6 @@ $code.=<<___;
         mov            $key_,$key
        aese            $dat2,q9
        aesmc           $tmp2,$dat2
-        vorr           $dat2,$ivec,$ivec
-        add            $tctr0,$ctr,#1
        aese            $tmp0,q12
        aesmc           $tmp0,$tmp0
        aese            $tmp1,q12
@@ -2033,20 +2031,22 @@ $code.=<<___;
        aese            $tmp1,q13
        aesmc           $tmp1,$tmp1
         veor           $in2,$in2,$rndlast
-        rev            $tctr0,$tctr0
+        vmov.32        ${ivec}[3], $tctr0
        aese            $tmp2,q13
        aesmc           $tmp2,$tmp2
-        vmov.32        ${dat0}[3], $tctr0
+        vorr           $dat0,$ivec,$ivec
         rev            $tctr1,$tctr1
        aese            $tmp0,q14
        aesmc           $tmp0,$tmp0
+        vmov.32        ${ivec}[3], $tctr1
+        rev            $tctr2,$ctr
        aese            $tmp1,q14
        aesmc           $tmp1,$tmp1
-        vmov.32        ${dat1}[3], $tctr1
-        rev            $tctr2,$ctr
+        vorr           $dat1,$ivec,$ivec
+        vmov.32        ${ivec}[3], $tctr2
        aese            $tmp2,q14
        aesmc           $tmp2,$tmp2
-        vmov.32        ${dat2}[3], $tctr2
+        vorr           $dat2,$ivec,$ivec
         subs           $len,$len,#3
        aese            $tmp0,q15
        aese            $tmp1,q15