projects
/
openssl.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
ec/asm/ecp_nistz256-x86_64.pl: /cmovb/cmovc/ as nasm doesn't recognize cmovb.
[openssl.git]
/
crypto
/
ec
/
asm
/
ecp_nistz256-x86_64.pl
diff --git
a/crypto/ec/asm/ecp_nistz256-x86_64.pl
b/crypto/ec/asm/ecp_nistz256-x86_64.pl
index ddbbedf047fbfd91821cce91abc230f66834d574..16b6639b542f0da74148ff0270c770a0a8c72d1c 100755
(executable)
--- a/
crypto/ec/asm/ecp_nistz256-x86_64.pl
+++ b/
crypto/ec/asm/ecp_nistz256-x86_64.pl
@@
-156,12
+156,12
@@
ecp_nistz256_mul_by_2:
sbb 8*3($a_ptr), $a3
sbb \$0, $t4
sbb 8*3($a_ptr), $a3
sbb \$0, $t4
- cmov
b
$t0, $a0
- cmov
b
$t1, $a1
+ cmov
c
$t0, $a0
+ cmov
c
$t1, $a1
mov $a0, 8*0($r_ptr)
mov $a0, 8*0($r_ptr)
- cmov
b
$t2, $a2
+ cmov
c
$t2, $a2
mov $a1, 8*1($r_ptr)
mov $a1, 8*1($r_ptr)
- cmov
b
$t3, $a3
+ cmov
c
$t3, $a3
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
@@
-260,10
+260,10
@@
ecp_nistz256_mul_by_3:
sbb .Lpoly+8*3(%rip), $a3
sbb \$0, $t4
sbb .Lpoly+8*3(%rip), $a3
sbb \$0, $t4
- cmov
b
$t0, $a0
- cmov
b
$t1, $a1
- cmov
b
$t2, $a2
- cmov
b
$t3, $a3
+ cmov
c
$t0, $a0
+ cmov
c
$t1, $a1
+ cmov
c
$t2, $a2
+ cmov
c
$t3, $a3
xor $t4, $t4
add 8*0($a_ptr), $a0 # a0:a3+=a_ptr[0:3]
xor $t4, $t4
add 8*0($a_ptr), $a0 # a0:a3+=a_ptr[0:3]
@@
-282,12
+282,12
@@
ecp_nistz256_mul_by_3:
sbb .Lpoly+8*3(%rip), $a3
sbb \$0, $t4
sbb .Lpoly+8*3(%rip), $a3
sbb \$0, $t4
- cmov
b
$t0, $a0
- cmov
b
$t1, $a1
+ cmov
c
$t0, $a0
+ cmov
c
$t1, $a1
mov $a0, 8*0($r_ptr)
mov $a0, 8*0($r_ptr)
- cmov
b
$t2, $a2
+ cmov
c
$t2, $a2
mov $a1, 8*1($r_ptr)
mov $a1, 8*1($r_ptr)
- cmov
b
$t3, $a3
+ cmov
c
$t3, $a3
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
@@
-328,12
+328,12
@@
ecp_nistz256_add:
sbb 8*3($a_ptr), $a3
sbb \$0, $t4
sbb 8*3($a_ptr), $a3
sbb \$0, $t4
- cmov
b
$t0, $a0
- cmov
b
$t1, $a1
+ cmov
c
$t0, $a0
+ cmov
c
$t1, $a1
mov $a0, 8*0($r_ptr)
mov $a0, 8*0($r_ptr)
- cmov
b
$t2, $a2
+ cmov
c
$t2, $a2
mov $a1, 8*1($r_ptr)
mov $a1, 8*1($r_ptr)
- cmov
b
$t3, $a3
+ cmov
c
$t3, $a3
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
@@
-1908,12
+1908,12
@@
__ecp_nistz256_add_toq:
sbb $poly3, $a3
sbb \$0, $t4
sbb $poly3, $a3
sbb \$0, $t4
- cmov
b
$t0, $a0
- cmov
b
$t1, $a1
+ cmov
c
$t0, $a0
+ cmov
c
$t1, $a1
mov $a0, 8*0($r_ptr)
mov $a0, 8*0($r_ptr)
- cmov
b
$t2, $a2
+ cmov
c
$t2, $a2
mov $a1, 8*1($r_ptr)
mov $a1, 8*1($r_ptr)
- cmov
b
$t3, $a3
+ cmov
c
$t3, $a3
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
@@
-1998,12
+1998,12
@@
__ecp_nistz256_mul_by_2q:
sbb $poly3, $a3
sbb \$0, $t4
sbb $poly3, $a3
sbb \$0, $t4
- cmov
b
$t0, $a0
- cmov
b
$t1, $a1
+ cmov
c
$t0, $a0
+ cmov
c
$t1, $a1
mov $a0, 8*0($r_ptr)
mov $a0, 8*0($r_ptr)
- cmov
b
$t2, $a2
+ cmov
c
$t2, $a2
mov $a1, 8*1($r_ptr)
mov $a1, 8*1($r_ptr)
- cmov
b
$t3, $a3
+ cmov
c
$t3, $a3
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
@@
-2474,13
+2474,13
@@
$code.=<<___;
sbb $poly3, $acc3
sbb \$0, $t4
sbb $poly3, $acc3
sbb \$0, $t4
- cmov
b
$t0, $acc0
+ cmov
c
$t0, $acc0
mov 8*0($a_ptr), $t0
mov 8*0($a_ptr), $t0
- cmov
b
$t1, $acc1
+ cmov
c
$t1, $acc1
mov 8*1($a_ptr), $t1
mov 8*1($a_ptr), $t1
- cmov
b
$t2, $acc2
+ cmov
c
$t2, $acc2
mov 8*2($a_ptr), $t2
mov 8*2($a_ptr), $t2
- cmov
b
$t3, $acc3
+ cmov
c
$t3, $acc3
mov 8*3($a_ptr), $t3
call __ecp_nistz256_sub$x # p256_sub(res_x, Rsqr, Hsqr);
mov 8*3($a_ptr), $t3
call __ecp_nistz256_sub$x # p256_sub(res_x, Rsqr, Hsqr);
@@
-2778,13
+2778,13
@@
$code.=<<___;
sbb $poly3, $acc3
sbb \$0, $t4
sbb $poly3, $acc3
sbb \$0, $t4
- cmov
b
$t0, $acc0
+ cmov
c
$t0, $acc0
mov 8*0($a_ptr), $t0
mov 8*0($a_ptr), $t0
- cmov
b
$t1, $acc1
+ cmov
c
$t1, $acc1
mov 8*1($a_ptr), $t1
mov 8*1($a_ptr), $t1
- cmov
b
$t2, $acc2
+ cmov
c
$t2, $acc2
mov 8*2($a_ptr), $t2
mov 8*2($a_ptr), $t2
- cmov
b
$t3, $acc3
+ cmov
c
$t3, $acc3
mov 8*3($a_ptr), $t3
call __ecp_nistz256_sub$x # p256_sub(res_x, Rsqr, Hsqr);
mov 8*3($a_ptr), $t3
call __ecp_nistz256_sub$x # p256_sub(res_x, Rsqr, Hsqr);
@@
-2938,12
+2938,12
@@
__ecp_nistz256_add_tox:
sbb $poly3, $a3
sbb \$0, $t4
sbb $poly3, $a3
sbb \$0, $t4
- cmov
b
$t0, $a0
- cmov
b
$t1, $a1
+ cmov
c
$t0, $a0
+ cmov
c
$t1, $a1
mov $a0, 8*0($r_ptr)
mov $a0, 8*0($r_ptr)
- cmov
b
$t2, $a2
+ cmov
c
$t2, $a2
mov $a1, 8*1($r_ptr)
mov $a1, 8*1($r_ptr)
- cmov
b
$t3, $a3
+ cmov
c
$t3, $a3
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
@@
-3033,12
+3033,12
@@
__ecp_nistz256_mul_by_2x:
sbb $poly3, $a3
sbb \$0, $t4
sbb $poly3, $a3
sbb \$0, $t4
- cmov
b
$t0, $a0
- cmov
b
$t1, $a1
+ cmov
c
$t0, $a0
+ cmov
c
$t1, $a1
mov $a0, 8*0($r_ptr)
mov $a0, 8*0($r_ptr)
- cmov
b
$t2, $a2
+ cmov
c
$t2, $a2
mov $a1, 8*1($r_ptr)
mov $a1, 8*1($r_ptr)
- cmov
b
$t3, $a3
+ cmov
c
$t3, $a3
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)