sha512-586.pl: fix typo.
[openssl.git] / crypto / sha / asm / sha512-586.pl
index 5554f022e99b777cfadeb739cee708e0096ddcbd..9fc792964f48c69286c53c335cc894c437c4492c 100644 (file)
@@ -1,7 +1,7 @@
 #!/usr/bin/env perl
 #
 # ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
 # project. The module is, however, dual licensed under OpenSSL and
 # CRYPTOGAMS licenses depending on where you obtain it. For further
 # details see http://www.openssl.org/~appro/cryptogams/.
@@ -9,17 +9,30 @@
 #
 # SHA512 block transform for x86. September 2007.
 #
+# May 2013.
+#
+# Add SSSE3 code path, 20-25% improvement [over original SSE2 code].
+#
 # Performance in clock cycles per processed byte (less is better):
 #
-#              Pentium PIII    P4      AMD K8  Core2
-# gcc          100     75      116     54      66
-# icc          97      77      95      55      57
-# x86 asm      61      56      82      36      40
-# SSE2 asm     -       -       38      24      20
-# x86_64 asm(*)        -       -       30      10.0    10.5
+#              gcc     icc     x86 asm SIMD(*) x86_64(**)
+# Pentium      100     97      61      -       -
+# PIII         75      77      56      -       -
+# P4           116     95      82      34.6    30.8
+# AMD K8       54      55      36      20.7    9.57
+# Core2                66      57      40      15.9    9.97
+# Westmere     70      -       38      12.2    9.58
+# Sandy Bridge 58      -       35      11.9    11.2
+# Ivy Bridge   50      -       33      11.5    8.17
+# Haswell      46      -       29      11.3    7.66
+# Bulldozer    121     -       50      14.0    13.5
+# VIA Nano     91      -       52      33      14.7
+# Atom         126     -       68      48(***) 14.7
 #
-# (*) x86_64 assembler performance is presented for reference
-#     purposes.
+# (*)  whichever best applicable.
+# (**) x86_64 assembler performance is presented for reference
+#      purposes, the results are for integer-only code.
+# (***)        paddq is increadibly slow on Atom.
 #
 # IALU code-path is optimized for elder Pentiums. On vanilla Pentium
 # performance improvement over compiler generated code reaches ~60%,
@@ -66,72 +79,77 @@ $Hsse2=&QWP(56,"esp");
 $A="mm0";      # B-D and
 $E="mm4";      # F-H are commonly loaded to respectively mm1-mm3 and
                # mm5-mm7, but it's done on on-demand basis...
+$BxC="mm2";    # ... except for B^C
 
 sub BODY_00_15_sse2 {
-    my $prefetch=shift;
+    my $phase=shift;
 
-       &movq   ("mm5",$Fsse2);                 # load f
-       &movq   ("mm6",$Gsse2);                 # load g
-       &movq   ("mm7",$Hsse2);                 # load h
+       #&movq  ("mm5",$Fsse2);                 # load f
+       #&movq  ("mm6",$Gsse2);                 # load g
 
        &movq   ("mm1",$E);                     # %mm1 is sliding right
-       &movq   ("mm2",$E);                     # %mm2 is sliding left
+        &pxor  ("mm5","mm6");                  # f^=g
        &psrlq  ("mm1",14);
-       &movq   ($Esse2,$E);                    # modulo-scheduled save e
-       &psllq  ("mm2",23);
+        &movq  ($Esse2,$E);                    # modulo-scheduled save e
+        &pand  ("mm5",$E);                     # f&=e
+       &psllq  ($E,23);                        # $E is sliding left
+        &movq  ($A,"mm3")                      if ($phase<2);
+        &movq  (&QWP(8*9,"esp"),"mm7")         # save X[i]
        &movq   ("mm3","mm1");                  # %mm3 is T1
-       &psrlq  ("mm1",4);
-       &pxor   ("mm3","mm2");
-       &psllq  ("mm2",23);
+        &psrlq ("mm1",4);
+        &pxor  ("mm5","mm6");                  # Ch(e,f,g)
+       &pxor   ("mm3",$E);
+        &psllq ($E,23);
        &pxor   ("mm3","mm1");
-       &psrlq  ("mm1",23);
-       &pxor   ("mm3","mm2");
-       &psllq  ("mm2",4);
+        &movq  ($Asse2,$A);                    # modulo-scheduled save a
+        &paddq ("mm7","mm5");                  # X[i]+=Ch(e,f,g)
+       &pxor   ("mm3",$E);
+        &psrlq ("mm1",23);
+        &paddq ("mm7",$Hsse2);                 # X[i]+=h
        &pxor   ("mm3","mm1");
-       &paddq  ("mm7",QWP(0,$K512));           # h+=K512[i]
-       &pxor   ("mm3","mm2");                  # T1=Sigma1_512(e)
-
-       &pxor   ("mm5","mm6");                  # f^=g
+        &psllq ($E,4);
+        &paddq ("mm7",QWP(0,$K512));           # X[i]+=K512[i]
+       &pxor   ("mm3",$E);                     # T1=Sigma1_512(e)
+
+        &movq  ($E,$Dsse2);                    # e = load d, e in next round
+       &paddq  ("mm3","mm7");                  # T1+=X[i]
+        &movq  ("mm5",$A);                     # %mm5 is sliding right
+        &psrlq ("mm5",28);
+       &paddq  ($E,"mm3");                     # d += T1
+        &movq  ("mm6",$A);                     # %mm6 is sliding left
+        &movq  ("mm7","mm5");
+        &psllq ("mm6",25);
        &movq   ("mm1",$Bsse2);                 # load b
-       &pand   ("mm5",$E);                     # f&=e
-       &movq   ("mm2",$Csse2);                 # load c
-       &pxor   ("mm5","mm6");                  # f^=g
-       &movq   ($E,$Dsse2);                    # e = load d
-       &paddq  ("mm3","mm5");                  # T1+=Ch(e,f,g)
-       &movq   (&QWP(0,"esp"),$A);             # modulo-scheduled save a
-       &paddq  ("mm3","mm7");                  # T1+=h
-
-       &movq   ("mm5",$A);                     # %mm5 is sliding right
-       &movq   ("mm6",$A);                     # %mm6 is sliding left
-       &paddq  ("mm3",&QWP(8*9,"esp"));        # T1+=X[0]
-       &psrlq  ("mm5",28);
-       &paddq  ($E,"mm3");                     # e += T1
-       &psllq  ("mm6",25);
-       &movq   ("mm7","mm5");                  # %mm7 is T2
-       &psrlq  ("mm5",6);
-       &pxor   ("mm7","mm6");
-       &psllq  ("mm6",5);
-       &pxor   ("mm7","mm5");
-       &psrlq  ("mm5",5);
-       &pxor   ("mm7","mm6");
-       &psllq  ("mm6",6);
-       &pxor   ("mm7","mm5");
+        &psrlq ("mm5",6);
+        &pxor  ("mm7","mm6");
        &sub    ("esp",8);
-       &pxor   ("mm7","mm6");                  # T2=Sigma0_512(a)
-
-       &movq   ("mm5",$A);                     # %mm5=a
-       &por    ($A,"mm2");                     # a=a|c
-       &movq   ("mm6",&QWP(8*(9+16-14),"esp")) if ($prefetch);
-       &pand   ("mm5","mm2");                  # %mm5=a&c
-       &pand   ($A,"mm1");                     # a=(a|c)&b
-       &movq   ("mm2",&QWP(8*(9+16-1),"esp"))  if ($prefetch);
-       &por    ("mm5",$A);                     # %mm5=(a&c)|((a|c)&b)
-       &paddq  ("mm7","mm5");                  # T2+=Maj(a,b,c)
-       &movq   ($A,"mm3");                     # a=T1
-
-       &mov    (&LB("edx"),&BP(0,$K512));
-       &paddq  ($A,"mm7");                     # a+=T2
-       &add    ($K512,8);
+        &psllq ("mm6",5);
+        &pxor  ("mm7","mm5");
+       &pxor   ($A,"mm1");                     # a^b, b^c in next round
+        &psrlq ("mm5",5);
+        &pxor  ("mm7","mm6");
+       &pand   ($BxC,$A);                      # (b^c)&(a^b)
+        &psllq ("mm6",6);
+        &pxor  ("mm7","mm5");
+       &pxor   ($BxC,"mm1");                   # [h=]Maj(a,b,c)
+        &pxor  ("mm6","mm7");                  # Sigma0_512(a)
+        &movq  ("mm7",&QWP(8*(9+16-1),"esp"))  if ($phase!=0); # pre-fetch
+        &movq  ("mm5",$Fsse2)                  if ($phase==0); # load f
+
+    if ($phase>1) {
+       &paddq  ($BxC,"mm6");                   # h+=Sigma0(a)
+        &add   ($K512,8);
+       #&paddq ($BxC,"mm3");                   # h+=T1
+
+       ($A,$BxC) = ($BxC,$A);                  # rotate registers
+    } else {
+       &paddq  ("mm3",$BxC);                   # T1+=Maj(a,b,c)
+        &movq  ($BxC,$A);
+        &add   ($K512,8);
+       &paddq  ("mm3","mm6");                  # T1+=Sigma0(a)
+        &movq  ("mm6",$Gsse2)                  if ($phase==0); # load g
+       #&movq  ($A,"mm3");                     # h=T1
+    }
 }
 
 sub BODY_00_15_x86 {
@@ -142,9 +160,9 @@ sub BODY_00_15_x86 {
        &mov    ("edx",$Ehi);
        &mov    ("esi","ecx");
 
-       &shr    ("ecx",9)       # lo>>9
+       &shr    ("ecx",9);      # lo>>9
        &mov    ("edi","edx");
-       &shr    ("edx",9)       # hi>>9
+       &shr    ("edx",9);      # hi>>9
        &mov    ("ebx","ecx");
        &shl    ("esi",14);     # lo<<14
        &mov    ("eax","edx");
@@ -207,9 +225,9 @@ sub BODY_00_15_x86 {
        &mov    ($Dhi,"ebx");
        &mov    ("esi","ecx");
 
-       &shr    ("ecx",2)       # lo>>2
+       &shr    ("ecx",2);      # lo>>2
        &mov    ("edi","edx");
-       &shr    ("edx",2)       # hi>>2
+       &shr    ("edx",2);      # hi>>2
        &mov    ("ebx","ecx");
        &shl    ("esi",4);      # lo<<4
        &mov    ("eax","edx");
@@ -261,7 +279,7 @@ sub BODY_00_15_x86 {
 }
 
 
-&function_begin("sha512_block_data_order",16);
+&function_begin("sha512_block_data_order");
        &mov    ("esi",wparam(0));      # ctx
        &mov    ("edi",wparam(1));      # inp
        &mov    ("eax",wparam(2));      # num
@@ -284,110 +302,357 @@ sub BODY_00_15_x86 {
 
 if ($sse2) {
        &picmeup("edx","OPENSSL_ia32cap_P",$K512,&label("K512"));
-       &bt     (&DWP(0,"edx"),26);
-       &jnc    (&label("loop_x86"));
+       &mov    ("ecx",&DWP(0,"edx"));
+       &test   ("ecx",1<<26);
+       &jz     (&label("loop_x86"));
+
+       &mov    ("edx",&DWP(4,"edx"));
 
        # load ctx->h[0-7]
        &movq   ($A,&QWP(0,"esi"));
+        &and   ("ecx",1<<24);          # XMM registers availability
        &movq   ("mm1",&QWP(8,"esi"));
-       &movq   ("mm2",&QWP(16,"esi"));
+        &and   ("edx",1<<9);           # SSSE3 bit
+       &movq   ($BxC,&QWP(16,"esi"));
+        &or    ("ecx","edx");
        &movq   ("mm3",&QWP(24,"esi"));
        &movq   ($E,&QWP(32,"esi"));
        &movq   ("mm5",&QWP(40,"esi"));
        &movq   ("mm6",&QWP(48,"esi"));
        &movq   ("mm7",&QWP(56,"esi"));
+       &cmp    ("ecx",1<<24|1<<9);
+       &je     (&label("SSSE3"));
        &sub    ("esp",8*10);
+       &jmp    (&label("loop_sse2"));
 
 &set_label("loop_sse2",16);
-       # &movq ($Asse2,$A);
+       #&movq  ($Asse2,$A);
        &movq   ($Bsse2,"mm1");
-       &movq   ($Csse2,"mm2");
+       &movq   ($Csse2,$BxC);
        &movq   ($Dsse2,"mm3");
-       # &movq ($Esse2,$E);
+       #&movq  ($Esse2,$E);
        &movq   ($Fsse2,"mm5");
        &movq   ($Gsse2,"mm6");
+       &pxor   ($BxC,"mm1");                   # magic
        &movq   ($Hsse2,"mm7");
+       &movq   ("mm3",$A);                     # magic
 
-       &mov    ("ecx",&DWP(0,"edi"));
-       &mov    ("edx",&DWP(4,"edi"));
+       &mov    ("eax",&DWP(0,"edi"));
+       &mov    ("ebx",&DWP(4,"edi"));
        &add    ("edi",8);
-       &bswap  ("ecx");
-       &bswap  ("edx");
-       &mov    (&DWP(8*9+4,"esp"),"ecx");
-       &mov    (&DWP(8*9+0,"esp"),"edx");
+       &mov    ("edx",15);                     # counter
+       &bswap  ("eax");
+       &bswap  ("ebx");
+       &jmp    (&label("00_14_sse2"));
 
 &set_label("00_14_sse2",16);
+       &movd   ("mm1","eax");
        &mov    ("eax",&DWP(0,"edi"));
+       &movd   ("mm7","ebx");
        &mov    ("ebx",&DWP(4,"edi"));
        &add    ("edi",8);
        &bswap  ("eax");
        &bswap  ("ebx");
-       &mov    (&DWP(8*8+4,"esp"),"eax");
-       &mov    (&DWP(8*8+0,"esp"),"ebx");
+       &punpckldq("mm7","mm1");
 
        &BODY_00_15_sse2();
 
-       &cmp    (&LB("edx"),0x35);
-       &jne    (&label("00_14_sse2"));
+       &dec    ("edx");
+       &jnz    (&label("00_14_sse2"));
+
+       &movd   ("mm1","eax");
+       &movd   ("mm7","ebx");
+       &punpckldq("mm7","mm1");
 
        &BODY_00_15_sse2(1);
 
+       &pxor   ($A,$A);                        # A is in %mm3
+       &mov    ("edx",32);                     # counter
+       &jmp    (&label("16_79_sse2"));
+
 &set_label("16_79_sse2",16);
-       #&movq  ("mm2",&QWP(8*(9+16-1),"esp")); #prefetched in BODY_00_15 
-       #&movq  ("mm6",&QWP(8*(9+16-14),"esp"));
-       &movq   ("mm1","mm2");
+    for ($j=0;$j<2;$j++) {                     # 2x unroll
+       #&movq  ("mm7",&QWP(8*(9+16-1),"esp")); # prefetched in BODY_00_15 
+       &movq   ("mm5",&QWP(8*(9+16-14),"esp"));
+       &movq   ("mm1","mm7");
+       &psrlq  ("mm7",1);
+        &movq  ("mm6","mm5");
+        &psrlq ("mm5",6);
+       &psllq  ("mm1",56);
+        &paddq ($A,"mm3");                     # from BODY_00_15
+        &movq  ("mm3","mm7");
+       &psrlq  ("mm7",7-1);
+        &pxor  ("mm3","mm1");
+        &psllq ("mm1",63-56);
+       &pxor   ("mm3","mm7");
+        &psrlq ("mm7",8-7);
+       &pxor   ("mm3","mm1");
+        &movq  ("mm1","mm5");
+        &psrlq ("mm5",19-6);
+       &pxor   ("mm7","mm3");                  # sigma0
+
+        &psllq ("mm6",3);
+        &pxor  ("mm1","mm5");
+       &paddq  ("mm7",&QWP(8*(9+16),"esp"));
+        &pxor  ("mm1","mm6");
+        &psrlq ("mm5",61-19);
+       &paddq  ("mm7",&QWP(8*(9+16-9),"esp"));
+        &pxor  ("mm1","mm5");
+        &psllq ("mm6",45-3);
+       &movq   ("mm5",$Fsse2);                 # load f
+        &pxor  ("mm1","mm6");                  # sigma1
+       &movq   ("mm6",$Gsse2);                 # load g
 
-       &psrlq  ("mm2",1);
-       &movq   ("mm7","mm6");
-       &psrlq  ("mm6",6);
-       &movq   ("mm3","mm2");
+       &paddq  ("mm7","mm1");                  # X[i]
+       #&movq  (&QWP(8*9,"esp"),"mm7");        # moved to BODY_00_15
 
-       &psrlq  ("mm2",7-1);
-       &movq   ("mm5","mm6");
-       &psrlq  ("mm6",19-6);
-       &pxor   ("mm3","mm2");
+       &BODY_00_15_sse2(2);
+    }
+       &dec    ("edx");
+       &jnz    (&label("16_79_sse2"));
 
-       &psrlq  ("mm2",8-7);
-       &pxor   ("mm5","mm6");
-       &psrlq  ("mm6",61-19);
-       &pxor   ("mm3","mm2");
+       #&movq  ($A,$Asse2);
+       &paddq  ($A,"mm3");                     # from BODY_00_15
+       &movq   ("mm1",$Bsse2);
+       #&movq  ($BxC,$Csse2);
+       &movq   ("mm3",$Dsse2);
+       #&movq  ($E,$Esse2);
+       &movq   ("mm5",$Fsse2);
+       &movq   ("mm6",$Gsse2);
+       &movq   ("mm7",$Hsse2);
 
-       &movq   ("mm2",&QWP(8*(9+16),"esp"));
+       &pxor   ($BxC,"mm1");                   # de-magic
+       &paddq  ($A,&QWP(0,"esi"));
+       &paddq  ("mm1",&QWP(8,"esi"));
+       &paddq  ($BxC,&QWP(16,"esi"));
+       &paddq  ("mm3",&QWP(24,"esi"));
+       &paddq  ($E,&QWP(32,"esi"));
+       &paddq  ("mm5",&QWP(40,"esi"));
+       &paddq  ("mm6",&QWP(48,"esi"));
+       &paddq  ("mm7",&QWP(56,"esi"));
 
-       &psllq  ("mm1",56);
-       &pxor   ("mm5","mm6");
-       &psllq  ("mm7",3);
-       &pxor   ("mm3","mm1");
+       &mov    ("eax",8*80);
+       &movq   (&QWP(0,"esi"),$A);
+       &movq   (&QWP(8,"esi"),"mm1");
+       &movq   (&QWP(16,"esi"),$BxC);
+       &movq   (&QWP(24,"esi"),"mm3");
+       &movq   (&QWP(32,"esi"),$E);
+       &movq   (&QWP(40,"esi"),"mm5");
+       &movq   (&QWP(48,"esi"),"mm6");
+       &movq   (&QWP(56,"esi"),"mm7");
 
-       &paddq  ("mm2",&QWP(8*(9+16-9),"esp"));
+       &lea    ("esp",&DWP(0,"esp","eax"));    # destroy frame
+       &sub    ($K512,"eax");                  # rewind K
 
-       &psllq  ("mm1",63-56);
-       &pxor   ("mm5","mm7");
-       &psllq  ("mm7",45-3);
-       &pxor   ("mm3","mm1");
-       &pxor   ("mm5","mm7");
+       &cmp    ("edi",&DWP(8*10+8,"esp"));     # are we done yet?
+       &jb     (&label("loop_sse2"));
 
-       &paddq  ("mm3","mm5");
-       &paddq  ("mm3","mm2");
-       &movq   (&QWP(8*9,"esp"),"mm3");
+       &mov    ("esp",&DWP(8*10+12,"esp"));    # restore sp
+       &emms   ();
+&function_end_A();
 
-       &BODY_00_15_sse2(1);
+&set_label("SSSE3",32);
+{ my ($cnt,$frame)=("ecx","edx");
+  my @X=map("xmm$_",(0..7));
+  my $j;
+  my $i=0;
+
+       &lea    ($frame,&DWP(-64,"esp"));
+       &sub    ("esp",256);
+
+       # fixed stack frame layout
+       #
+       # +0    A B C D E F G H         # backing store
+       # +64   X[0]+K[i] .. X[15]+K[i] # XMM->MM xfer area
+       # +192                          # XMM off-load ring buffer
+       # +256                          # saved parameters
+
+       &movdqa         (@X[1],&QWP(80*8,$K512));               # byte swap mask
+       &movdqu         (@X[0],&QWP(0,"edi"));
+       &pshufb         (@X[0],@X[1]);
+    for ($j=0;$j<8;$j++) {
+       &movdqa         (&QWP(16*(($j-1)%4),$frame),@X[3])      if ($j>4); # off-load
+       &movdqa         (@X[3],&QWP(16*($j%8),$K512));
+       &movdqa         (@X[2],@X[1])                           if ($j<7); # perpetuate byte swap mask
+       &movdqu         (@X[1],&QWP(16*($j+1),"edi"))           if ($j<7); # next input
+       &movdqa         (@X[1],&QWP(16*(($j+1)%4),$frame))      if ($j==7);# restore @X[0]
+       &paddq          (@X[3],@X[0]);
+       &pshufb         (@X[1],@X[2])                           if ($j<7);
+       &movdqa         (&QWP(16*($j%8)-128,$frame),@X[3]);     # xfer X[i]+K[i]
+
+       push(@X,shift(@X));                                     # rotate(@X)
+    }
+       #&jmp           (&label("loop_ssse3"));
+       &nop            ();
 
-       &cmp    (&LB("edx"),0x17);
-       &jne    (&label("16_79_sse2"));
+&set_label("loop_ssse3",32);
+       &movdqa         (@X[2],&QWP(16*(($j+1)%4),$frame));     # pre-restore @X[1]
+       &movdqa         (&QWP(16*(($j-1)%4),$frame),@X[3]);     # off-load @X[3]
+       &lea            ($K512,&DWP(16*8,$K512));
+
+       #&movq  ($Asse2,$A);                    # off-load A-H
+       &movq   ($Bsse2,"mm1");
+        &mov   ("ebx","edi");
+       &movq   ($Csse2,$BxC);
+        &lea   ("edi",&DWP(128,"edi"));        # advance input
+       &movq   ($Dsse2,"mm3");
+        &cmp   ("edi","eax");
+       #&movq  ($Esse2,$E);
+       &movq   ($Fsse2,"mm5");
+        &cmovb ("ebx","edi");
+       &movq   ($Gsse2,"mm6");
+        &mov   ("ecx",4);                      # loop counter
+       &pxor   ($BxC,"mm1");                   # magic
+       &movq   ($Hsse2,"mm7");
+       &pxor   ("mm3","mm3");                  # magic
+
+       &jmp            (&label("00_47_ssse3"));
+
+sub BODY_00_15_ssse3 {         # "phase-less" copy of BODY_00_15_sse2
+       (
+       '&movq  ("mm1",$E)',                            # %mm1 is sliding right
+       '&movq  ("mm7",&QWP(((-8*$i)%128)-128,$frame))',# X[i]+K[i]
+        '&pxor ("mm5","mm6")',                         # f^=g
+       '&psrlq ("mm1",14)',
+        '&movq (&QWP(8*($i+4)%64,"esp"),$E)',          # modulo-scheduled save e
+        '&pand ("mm5",$E)',                            # f&=e
+       '&psllq ($E,23)',                               # $E is sliding left
+       '&paddq ($A,"mm3")',                            # [h+=Maj(a,b,c)]
+       '&movq  ("mm3","mm1")',                         # %mm3 is T1
+        '&psrlq("mm1",4)',
+        '&pxor ("mm5","mm6")',                         # Ch(e,f,g)
+       '&pxor  ("mm3",$E)',
+        '&psllq($E,23)',
+       '&pxor  ("mm3","mm1")',
+        '&movq (&QWP(8*$i%64,"esp"),$A)',              # modulo-scheduled save a
+        '&paddq("mm7","mm5")',                         # X[i]+=Ch(e,f,g)
+       '&pxor  ("mm3",$E)',
+        '&psrlq("mm1",23)',
+        '&paddq("mm7",&QWP(8*($i+7)%64,"esp"))',       # X[i]+=h
+       '&pxor  ("mm3","mm1")',
+        '&psllq($E,4)',
+       '&pxor  ("mm3",$E)',                            # T1=Sigma1_512(e)
+
+        '&movq ($E,&QWP(8*($i+3)%64,"esp"))',          # e = load d, e in next round
+       '&paddq ("mm3","mm7")',                         # T1+=X[i]
+        '&movq ("mm5",$A)',                            # %mm5 is sliding right
+        '&psrlq("mm5",28)',
+       '&paddq ($E,"mm3")',                            # d += T1
+        '&movq ("mm6",$A)',                            # %mm6 is sliding left
+        '&movq ("mm7","mm5")',
+        '&psllq("mm6",25)',
+       '&movq  ("mm1",&QWP(8*($i+1)%64,"esp"))',       # load b
+        '&psrlq("mm5",6)',
+        '&pxor ("mm7","mm6")',
+        '&psllq("mm6",5)',
+        '&pxor ("mm7","mm5")',
+       '&pxor  ($A,"mm1")',                            # a^b, b^c in next round
+        '&psrlq("mm5",5)',
+        '&pxor ("mm7","mm6")',
+       '&pand  ($BxC,$A)',                             # (b^c)&(a^b)
+        '&psllq("mm6",6)',
+        '&pxor ("mm7","mm5")',
+       '&pxor  ($BxC,"mm1")',                          # [h=]Maj(a,b,c)
+        '&pxor ("mm6","mm7")',                         # Sigma0_512(a)
+        '&movq ("mm5",&QWP(8*($i+5-1)%64,"esp"))',     # pre-load f
+       '&paddq ($BxC,"mm6")',                          # h+=Sigma0(a)
+        '&movq ("mm6",&QWP(8*($i+6-1)%64,"esp"))',     # pre-load g
+
+       '($A,$BxC) = ($BxC,$A); $i--;'
+       );
+}
 
-       # &movq ($A,$Asse2);
+&set_label("00_47_ssse3",32);
+
+    for(;$j<16;$j++) {
+       my ($t0,$t2,$t1)=@X[2..4];
+       my @insns = (&BODY_00_15_ssse3(),&BODY_00_15_ssse3());
+
+       &movdqa         ($t2,@X[5]);
+       &movdqa         (@X[1],$t0);                    # restore @X[1]
+       &palignr        ($t0,@X[0],8);                  # X[1..2]
+       &movdqa         (&QWP(16*($j%4),$frame),@X[4]); # off-load @X[4]
+        &palignr       ($t2,@X[4],8);                  # X[9..10]
+
+       &movdqa         ($t1,$t0);
+       &psrlq          ($t0,7);
+        &paddq         (@X[0],$t2);                    # X[0..1] += X[9..10]
+       &movdqa         ($t2,$t1);
+       &psrlq          ($t1,1);
+       &psllq          ($t2,64-8);
+       &pxor           ($t0,$t1);
+       &psrlq          ($t1,8-1);
+       &pxor           ($t0,$t2);
+       &psllq          ($t2,8-1);
+       &pxor           ($t0,$t1);
+        &movdqa        ($t1,@X[7]);
+       &pxor           ($t0,$t2);                      # sigma0(X[1..2])
+        &movdqa        ($t2,@X[7]);
+        &psrlq         ($t1,6);
+       &paddq          (@X[0],$t0);                    # X[0..1] += sigma0(X[1..2])
+
+       &movdqa         ($t0,@X[7]);
+       &psrlq          ($t2,19);
+       &psllq          ($t0,64-61);
+       &pxor           ($t1,$t2);
+       &psrlq          ($t2,61-19);
+       &pxor           ($t1,$t0);
+       &psllq          ($t0,61-19);
+       &pxor           ($t1,$t2);
+       &movdqa         ($t2,&QWP(16*(($j+2)%4),$frame));# pre-restore @X[1]
+       &pxor           ($t1,$t0);                      # sigma0(X[1..2])
+       &movdqa         ($t0,&QWP(16*($j%8),$K512));
+        eval(shift(@insns));
+       &paddq          (@X[0],$t1);                    # X[0..1] += sigma0(X[14..15])
+        eval(shift(@insns));
+        eval(shift(@insns));
+        eval(shift(@insns));
+        eval(shift(@insns));
+       &paddq          ($t0,@X[0]);
+        foreach(@insns) { eval; }
+       &movdqa         (&QWP(16*($j%8)-128,$frame),$t0);# xfer X[i]+K[i]
+
+       push(@X,shift(@X));                             # rotate(@X)
+    }
+       &lea            ($K512,&DWP(16*8,$K512));
+       &dec            ("ecx");
+       &jnz            (&label("00_47_ssse3"));
+
+       &movdqa         (@X[1],&QWP(0,$K512));          # byte swap mask
+       &lea            ($K512,&DWP(-80*8,$K512));      # rewind
+       &movdqu         (@X[0],&QWP(0,"ebx"));
+       &pshufb         (@X[0],@X[1]);
+
+    for ($j=0;$j<8;$j++) {     # load next or same block
+       my @insns = (&BODY_00_15_ssse3(),&BODY_00_15_ssse3());
+
+       &movdqa         (&QWP(16*(($j-1)%4),$frame),@X[3])      if ($j>4); # off-load
+       &movdqa         (@X[3],&QWP(16*($j%8),$K512));
+       &movdqa         (@X[2],@X[1])                           if ($j<7); # perpetuate byte swap mask
+       &movdqu         (@X[1],&QWP(16*($j+1),"ebx"))           if ($j<7); # next input
+       &movdqa         (@X[1],&QWP(16*(($j+1)%4),$frame))      if ($j==7);# restore @X[0]
+       &paddq          (@X[3],@X[0]);
+       &pshufb         (@X[1],@X[2])                           if ($j<7);
+        foreach(@insns) { eval; }
+       &movdqa         (&QWP(16*($j%8)-128,$frame),@X[3]);# xfer X[i]+K[i]
+
+       push(@X,shift(@X));                             # rotate(@X)
+    }
+
+       #&movq  ($A,$Asse2);                    # load A-H
        &movq   ("mm1",$Bsse2);
-       &movq   ("mm2",$Csse2);
+       &paddq  ($A,"mm3");                     # from BODY_00_15
+       #&movq  ($BxC,$Csse2);
        &movq   ("mm3",$Dsse2);
-       # &movq ($E,$Esse2);
-       &movq   ("mm5",$Fsse2);
-       &movq   ("mm6",$Gsse2);
+       #&movq  ($E,$Esse2);
+       #&movq  ("mm5",$Fsse2);
+       #&movq  ("mm6",$Gsse2);
        &movq   ("mm7",$Hsse2);
 
+       &pxor   ($BxC,"mm1");                   # de-magic
        &paddq  ($A,&QWP(0,"esi"));
        &paddq  ("mm1",&QWP(8,"esi"));
-       &paddq  ("mm2",&QWP(16,"esi"));
+       &paddq  ($BxC,&QWP(16,"esi"));
        &paddq  ("mm3",&QWP(24,"esi"));
        &paddq  ($E,&QWP(32,"esi"));
        &paddq  ("mm5",&QWP(40,"esi"));
@@ -396,21 +661,19 @@ if ($sse2) {
 
        &movq   (&QWP(0,"esi"),$A);
        &movq   (&QWP(8,"esi"),"mm1");
-       &movq   (&QWP(16,"esi"),"mm2");
+       &movq   (&QWP(16,"esi"),$BxC);
        &movq   (&QWP(24,"esi"),"mm3");
        &movq   (&QWP(32,"esi"),$E);
        &movq   (&QWP(40,"esi"),"mm5");
        &movq   (&QWP(48,"esi"),"mm6");
        &movq   (&QWP(56,"esi"),"mm7");
 
-       &add    ("esp",8*80);                   # destroy frame
-       &sub    ($K512,8*80);                   # rewind K
-
-       &cmp    ("edi",&DWP(8*10+8,"esp"));     # are we done yet?
-       &jb     (&label("loop_sse2"));
+       &cmp    ("edi","eax")                   # are we done yet?
+       &jb     (&label("loop_ssse3"));
 
+       &mov    ("esp",&DWP(64+12,$frame));     # restore sp
        &emms   ();
-       &mov    ("esp",&DWP(8*10+12,"esp"));    # restore sp
+}
 &function_end_A();
 }
 &set_label("loop_x86",16);
@@ -452,9 +715,9 @@ if ($sse2) {
        &mov    ("edx",&DWP(8*(9+15+16-1)+4,"esp"));
        &mov    ("esi","ecx");
 
-       &shr    ("ecx",1)       # lo>>1
+       &shr    ("ecx",1);      # lo>>1
        &mov    ("edi","edx");
-       &shr    ("edx",1)       # hi>>1
+       &shr    ("edx",1);      # hi>>1
        &mov    ("eax","ecx");
        &shl    ("esi",24);     # lo<<24
        &mov    ("ebx","edx");
@@ -488,9 +751,9 @@ if ($sse2) {
        &mov    ("edx",&DWP(8*(9+15+16-14)+4,"esp"));
        &mov    ("esi","ecx");
 
-       &shr    ("ecx",6)       # lo>>6
+       &shr    ("ecx",6);      # lo>>6
        &mov    ("edi","edx");
-       &shr    ("edx",6)       # hi>>6
+       &shr    ("edx",6);      # hi>>6
        &mov    ("eax","ecx");
        &shl    ("esi",3);      # lo<<3
        &mov    ("ebx","edx");
@@ -638,6 +901,9 @@ if ($sse2) {
        &data_word(0xfc657e2a,0x597f299c);      # u64
        &data_word(0x3ad6faec,0x5fcb6fab);      # u64
        &data_word(0x4a475817,0x6c44198c);      # u64
+
+       &data_word(0x04050607,0x00010203);      # byte swap
+       &data_word(0x0c0d0e0f,0x08090a0b);      # mask
 &function_end_B("sha512_block_data_order");
 &asciz("SHA512 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");