projects
/
openssl.git
/ commitdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
| commitdiff |
tree
raw
|
patch
|
inline
| side by side (parent:
3739a77
)
Alpha assembler pack: adapt for Linux.
author
Andy Polyakov
<appro@openssl.org>
Mon, 13 Sep 2010 13:28:52 +0000
(13:28 +0000)
committer
Andy Polyakov
<appro@openssl.org>
Mon, 13 Sep 2010 13:28:52 +0000
(13:28 +0000)
PR: 2335
crypto/alphacpuid.pl
patch
|
blob
|
history
crypto/bn/asm/alpha-mont.pl
patch
|
blob
|
history
crypto/modes/asm/ghash-alpha.pl
patch
|
blob
|
history
crypto/sha/asm/sha1-alpha.pl
patch
|
blob
|
history
diff --git
a/crypto/alphacpuid.pl
b/crypto/alphacpuid.pl
index 57a473b0df93c1931a071a2c0d4f5906db91df22..c9474ff497521ed7e42c149ce73d80ba969b5c7f 100644
(file)
--- a/
crypto/alphacpuid.pl
+++ b/
crypto/alphacpuid.pl
@@
-70,9
+70,9
@@
OPENSSL_wipe_cpu:
OPENSSL_atomic_add:
.frame $30,0,$26
.prologue 0
OPENSSL_atomic_add:
.frame $30,0,$26
.prologue 0
-1: ldl_l $0,($16)
+1: ldl_l $0,
0
($16)
addl $0,$17,$1
addl $0,$17,$1
- stl_c $1,($16)
+ stl_c $1,
0
($16)
beq $1,1b
addl $0,$17,$0
ret ($26)
beq $1,1b
addl $0,$17,$0
ret ($26)
diff --git
a/crypto/bn/asm/alpha-mont.pl
b/crypto/bn/asm/alpha-mont.pl
index f7e0ca1646cd3caad91ec442aee62fc3031bd039..c63458e94f8b8a35bbc606e3d7b61b4836b2aa19 100644
(file)
--- a/
crypto/bn/asm/alpha-mont.pl
+++ b/
crypto/bn/asm/alpha-mont.pl
@@
-41,8
+41,12
@@
$j="s4";
$m1="s5";
$code=<<___;
$m1="s5";
$code=<<___;
+#indef __linux__
+#include <asm/regdef.h>
+#else
#include <asm.h>
#include <regdef.h>
#include <asm.h>
#include <regdef.h>
+#endif
.text
.text
@@
-76,7
+80,7
@@
bn_mul_mont:
ldq $aj,8($ap)
subq sp,AT,sp
ldq $bi,0($bp) # bp[0]
ldq $aj,8($ap)
subq sp,AT,sp
ldq $bi,0($bp) # bp[0]
-
mov
-4096,AT
+
lda AT,-4096(zero) # mov
-4096,AT
ldq $n0,0($n0)
and sp,AT,sp
ldq $n0,0($n0)
and sp,AT,sp
@@
-106,9
+110,9
@@
bn_mul_mont:
.align 4
.L1st:
.set noreorder
.align 4
.L1st:
.set noreorder
- ldq $aj,($aj)
+ ldq $aj,
0
($aj)
addl $j,1,$j
addl $j,1,$j
- ldq $nj,($nj)
+ ldq $nj,
0
($nj)
lda $tp,8($tp)
addq $alo,$hi0,$lo0
lda $tp,8($tp)
addq $alo,$hi0,$lo0
@@
-159,12
+163,12
@@
bn_mul_mont:
.align 4
.Louter:
s8addq $i,$bp,$bi
.align 4
.Louter:
s8addq $i,$bp,$bi
- ldq $hi0,($ap)
+ ldq $hi0,
0
($ap)
ldq $aj,8($ap)
ldq $aj,8($ap)
- ldq $bi,($bi)
- ldq $hi1,($np)
+ ldq $bi,
0
($bi)
+ ldq $hi1,
0
($np)
ldq $nj,8($np)
ldq $nj,8($np)
- ldq $tj,(sp)
+ ldq $tj,
0
(sp)
mulq $hi0,$bi,$lo0
umulh $hi0,$bi,$hi0
mulq $hi0,$bi,$lo0
umulh $hi0,$bi,$hi0
@@
-195,10
+199,10
@@
bn_mul_mont:
.set noreorder
ldq $tj,8($tp) #L0
nop #U1
.set noreorder
ldq $tj,8($tp) #L0
nop #U1
- ldq $aj,
($aj)
#L1
+ ldq $aj,
0($aj)
#L1
s8addq $j,$np,$nj #U0
s8addq $j,$np,$nj #U0
- ldq $nj,
($nj)
#L0
+ ldq $nj,
0($nj)
#L0
nop #U1
addq $alo,$hi0,$lo0 #L1
lda $tp,8($tp)
nop #U1
addq $alo,$hi0,$lo0 #L1
lda $tp,8($tp)
@@
-247,7
+251,7
@@
bn_mul_mont:
addq $hi1,v0,$hi1
addq $hi1,$hi0,$lo1
addq $hi1,v0,$hi1
addq $hi1,$hi0,$lo1
- stq $j,($tp)
+ stq $j,
0
($tp)
cmpult $lo1,$hi0,$hi1
addq $lo1,$tj,$lo1
cmpult $lo1,$tj,AT
cmpult $lo1,$hi0,$hi1
addq $lo1,$tj,$lo1
cmpult $lo1,$tj,AT
@@
-265,8
+269,8
@@
bn_mul_mont:
mov 0,$hi0 # clear borrow bit
.align 4
mov 0,$hi0 # clear borrow bit
.align 4
-.Lsub: ldq $lo0,($tp)
- ldq $lo1,($np)
+.Lsub: ldq $lo0,
0
($tp)
+ ldq $lo1,
0
($np)
lda $tp,8($tp)
lda $np,8($np)
subq $lo0,$lo1,$lo1 # tp[i]-np[i]
lda $tp,8($tp)
lda $np,8($np)
subq $lo0,$lo1,$lo1 # tp[i]-np[i]
@@
-274,7
+278,7
@@
bn_mul_mont:
subq $lo1,$hi0,$lo0
cmpult $lo1,$lo0,$hi0
or $hi0,AT,$hi0
subq $lo1,$hi0,$lo0
cmpult $lo1,$lo0,$hi0
or $hi0,AT,$hi0
- stq $lo0,($rp)
+ stq $lo0,
0
($rp)
cmpult $tp,$tj,v0
lda $rp,8($rp)
bne v0,.Lsub
cmpult $tp,$tj,v0
lda $rp,8($rp)
bne v0,.Lsub
@@
-288,7
+292,7
@@
bn_mul_mont:
bis $bp,$ap,$ap # ap=borrow?tp:rp
.align 4
bis $bp,$ap,$ap # ap=borrow?tp:rp
.align 4
-.Lcopy: ldq $aj,
($ap)
# copy or in-place refresh
+.Lcopy: ldq $aj,
0($ap)
# copy or in-place refresh
lda $tp,8($tp)
lda $rp,8($rp)
lda $ap,8($ap)
lda $tp,8($tp)
lda $rp,8($rp)
lda $ap,8($ap)
@@
-309,8
+313,8
@@
bn_mul_mont:
lda sp,48(sp)
ret (ra)
.end bn_mul_mont
lda sp,48(sp)
ret (ra)
.end bn_mul_mont
-.
rdata
-.a
sciiz "Montgomery Multiplication for Alpha, CRYPTOGAMS by <appro\@openssl.org>"
+.
ascii "Montgomery Multiplication for Alpha, CRYPTOGAMS by <appro\@openssl.org>"
+.a
lign 2
___
print $code;
___
print $code;
diff --git
a/crypto/modes/asm/ghash-alpha.pl
b/crypto/modes/asm/ghash-alpha.pl
index be3c7ef5c0226c959344a9990f56c43a0c682adf..6358b2750fabf54c0c96b8103fbd2057ae651ad0 100644
(file)
--- a/
crypto/modes/asm/ghash-alpha.pl
+++ b/
crypto/modes/asm/ghash-alpha.pl
@@
-245,8
+245,12
@@
___
}}
$code=<<___;
}}
$code=<<___;
+#ifdef __linux__
+#include <asm/regdef.h>
+#else
#include <asm.h>
#include <regdef.h>
#include <asm.h>
#include <regdef.h>
+#endif
.text
.text
@@
-437,7
+441,7
@@
rem_4bit:
.quad 0x7080<<48, 0x6CA0<<48, 0x48C0<<48, 0x54E0<<48
.quad 0xE100<<48, 0xFD20<<48, 0xD940<<48, 0xC560<<48
.quad 0x9180<<48, 0x8DA0<<48, 0xA9C0<<48, 0xB5E0<<48
.quad 0x7080<<48, 0x6CA0<<48, 0x48C0<<48, 0x54E0<<48
.quad 0xE100<<48, 0xFD20<<48, 0xD940<<48, 0xC560<<48
.quad 0x9180<<48, 0x8DA0<<48, 0xA9C0<<48, 0xB5E0<<48
-.ascii
z
"GHASH for Alpha, CRYPTOGAMS by <appro\@openssl.org>"
+.ascii "GHASH for Alpha, CRYPTOGAMS by <appro\@openssl.org>"
.align 4
___
.align 4
___
diff --git
a/crypto/sha/asm/sha1-alpha.pl
b/crypto/sha/asm/sha1-alpha.pl
index 9d1e9435acd6a39fa275ba0aa28cf780a156a336..6c4b9251fd4ed69ba87f552a420f3b3bf2505a3d 100644
(file)
--- a/
crypto/sha/asm/sha1-alpha.pl
+++ b/
crypto/sha/asm/sha1-alpha.pl
@@
-225,8
+225,12
@@
___
}
$code=<<___;
}
$code=<<___;
+#ifdef __linux__
+#include <asm/regdef.h>
+#else
#include <asm.h>
#include <regdef.h>
#include <asm.h>
#include <regdef.h>
+#endif
.text
.text
@@
-310,6
+314,8
@@
$code.=<<___;
lda sp,64(sp)
ret (ra)
.end sha1_block_data_order
lda sp,64(sp)
ret (ra)
.end sha1_block_data_order
+.ascii "SHA1 block transform for Alpha, CRYPTOGAMS by <appro\@openssl.org>"
+.align 2
___
$output=shift and open STDOUT,">$output";
print $code;
___
$output=shift and open STDOUT,">$output";
print $code;