Add so called Vector Permutation AES x86[_64] assembler, see
authorAndy Polyakov <appro@openssl.org>
Mon, 12 Sep 2011 08:25:14 +0000 (08:25 +0000)
committerAndy Polyakov <appro@openssl.org>
Mon, 12 Sep 2011 08:25:14 +0000 (08:25 +0000)
http://crypto.stanford.edu/vpaes/ for background information.
It's not integrated into build system yet.

crypto/aes/asm/vpaes-x86.pl [new file with mode: 0644]
crypto/aes/asm/vpaes-x86_64.pl [new file with mode: 0644]
crypto/perlasm/x86_64-xlate.pl

diff --git a/crypto/aes/asm/vpaes-x86.pl b/crypto/aes/asm/vpaes-x86.pl
new file mode 100644 (file)
index 0000000..1de722b
--- /dev/null
@@ -0,0 +1,900 @@
+#!/usr/bin/env perl
+
+######################################################################
+## Constant-time SSSE3 AES core implementation.
+## version 0.1
+##
+## By Mike Hamburg (Stanford University), 2009
+## Public domain.
+##
+## For details see http://shiftleft.org/papers/vector_aes/ and
+## http://crypto.stanford.edu/vpaes/.
+
+######################################################################
+# September 2011.
+#
+# Port vpaes-x86_64.pl as 32-bit "almost" drop-in replacement for
+# aes-586.pl. "Almost" refers to the fact that AES_cbc_encrypt
+# doesn't handle partial vectors (doesn't have to if called from
+# EVP only). "Drop-in" implies that this module doesn't share key
+# schedule structure with the original nor does it make assumption
+# about its alignment...
+#
+# Performance summary. aes-586.pl column lists large-block CBC
+# encrypt/decrypt/with-hypert-hreading-off(*) results in cycles per
+# byte processed with 128-bit key, and vpaes-x86.pl column -
+# encrypt/decrypt.
+#
+#              aes-586.pl              vpaes-x86.pl
+#
+# Core 2(**)   29.1/42.3/18.3          22.0/25.6(***)
+# Nehalem      27.9/40.4/18.1          10.3/12.0
+# Atom         102./119./60.1          64.5/85.3(***)
+#
+# (*)  "Hyper-threading" in the context refers rather to cache shared
+#      among multiple cores, than to specifically Intel HTT. As vast
+#      majority of contemporary cores share cache, slower code path
+#      is common place. In other words "with-hyper-threading-off"
+#      results are presented mostly for reference purposes.
+#
+# (**) "Core 2" refers to initial 65nm design, a.k.a. Conroe.
+#
+# (***)        Less impressive improvement on Core 2 and Atom is due to slow
+#      pshufb, yet it's respectable +32%/65%  improvement on Core 2
+#      and +58%/40% on Atom.
+#
+#                                              <appro@openss.org>
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],"vpaes-x86.pl",$x86only = $ARGV[$#ARGV] eq "386");
+
+$PREFIX="AES";
+
+my  ($round, $base, $magic, $key, $const, $inp, $out)=
+    ("eax",  "ebx", "ecx",  "edx","ebp",  "esi","edi");
+
+&static_label("_vpaes_consts");
+&static_label("_vpaes_schedule_low_round");
+
+&set_label("_vpaes_consts",64);
+$k_inv=-0x30;          # inv, inva
+       &data_word(0x0D080180,0x0E05060F,0x0A0B0C02,0x04070309);
+       &data_word(0x0F0B0780,0x01040A06,0x02050809,0x030D0E0C);
+
+$k_s0F=-0x10;          # s0F
+       &data_word(0x0F0F0F0F,0x0F0F0F0F,0x0F0F0F0F,0x0F0F0F0F);
+
+$k_ipt=0x00;           # input transform (lo, hi)
+       &data_word(0x5A2A7000,0xC2B2E898,0x52227808,0xCABAE090);
+       &data_word(0x317C4D00,0x4C01307D,0xB0FDCC81,0xCD80B1FC);
+
+$k_sb1=0x20;           # sb1u, sb1t
+       &data_word(0xCB503E00,0xB19BE18F,0x142AF544,0xA5DF7A6E);
+       &data_word(0xFAE22300,0x3618D415,0x0D2ED9EF,0x3BF7CCC1);
+$k_sb2=0x40;           # sb2u, sb2t
+       &data_word(0x0B712400,0xE27A93C6,0xBC982FCD,0x5EB7E955);
+       &data_word(0x0AE12900,0x69EB8840,0xAB82234A,0xC2A163C8);
+$k_sbo=0x60;           # sbou, sbot
+       &data_word(0x6FBDC700,0xD0D26D17,0xC502A878,0x15AABF7A);
+       &data_word(0x5FBB6A00,0xCFE474A5,0x412B35FA,0x8E1E90D1);
+
+$k_mc_forward=0x80;    # mc_forward
+       &data_word(0x00030201,0x04070605,0x080B0A09,0x0C0F0E0D);
+       &data_word(0x04070605,0x080B0A09,0x0C0F0E0D,0x00030201);
+       &data_word(0x080B0A09,0x0C0F0E0D,0x00030201,0x04070605);
+       &data_word(0x0C0F0E0D,0x00030201,0x04070605,0x080B0A09);
+
+$k_mc_backward=0xc0;   # mc_backward
+       &data_word(0x02010003,0x06050407,0x0A09080B,0x0E0D0C0F);
+       &data_word(0x0E0D0C0F,0x02010003,0x06050407,0x0A09080B);
+       &data_word(0x0A09080B,0x0E0D0C0F,0x02010003,0x06050407);
+       &data_word(0x06050407,0x0A09080B,0x0E0D0C0F,0x02010003);
+
+$k_sr=0x100;           # sr
+       &data_word(0x03020100,0x07060504,0x0B0A0908,0x0F0E0D0C);
+       &data_word(0x0F0A0500,0x030E0904,0x07020D08,0x0B06010C);
+       &data_word(0x0B020900,0x0F060D04,0x030A0108,0x070E050C);
+       &data_word(0x070A0D00,0x0B0E0104,0x0F020508,0x0306090C);
+
+$k_rcon=0x140;         # rcon
+       &data_word(0xAF9DEEB6,0x1F8391B9,0x4D7C7D81,0x702A9808);
+
+$k_s63=0x150;          # s63: all equal to 0x63 transformed
+       &data_word(0x5B5B5B5B,0x5B5B5B5B,0x5B5B5B5B,0x5B5B5B5B);
+
+$k_opt=0x160;          # output transform
+       &data_word(0xD6B66000,0xFF9F4929,0xDEBE6808,0xF7974121);
+       &data_word(0x50BCEC00,0x01EDBD51,0xB05C0CE0,0xE10D5DB1);
+
+$k_deskew=0x180;       # deskew tables: inverts the sbox's "skew"
+       &data_word(0x47A4E300,0x07E4A340,0x5DBEF91A,0x1DFEB95A);
+       &data_word(0x83EA6900,0x5F36B5DC,0xF49D1E77,0x2841C2AB);
+##
+##  Decryption stuff
+##  Key schedule constants
+##
+$k_dksd=0x1a0;         # decryption key schedule: invskew x*D
+       &data_word(0xA3E44700,0xFEB91A5D,0x5A1DBEF9,0x0740E3A4);
+       &data_word(0xB5368300,0x41C277F4,0xAB289D1E,0x5FDC69EA);
+$k_dksb=0x1c0;         # decryption key schedule: invskew x*B
+       &data_word(0x8550D500,0x9A4FCA1F,0x1CC94C99,0x03D65386);
+       &data_word(0xB6FC4A00,0x115BEDA7,0x7E3482C8,0xD993256F);
+$k_dkse=0x1e0;         # decryption key schedule: invskew x*E + 0x63
+       &data_word(0x1FC9D600,0xD5031CCA,0x994F5086,0x53859A4C);
+       &data_word(0x4FDC7BE8,0xA2319605,0x20B31487,0xCD5EF96A);
+$k_dks9=0x200;         # decryption key schedule: invskew x*9
+       &data_word(0x7ED9A700,0xB6116FC8,0x82255BFC,0x4AED9334);
+       &data_word(0x27143300,0x45765162,0xE9DAFDCE,0x8BB89FAC);
+
+##
+##  Decryption stuff
+##  Round function constants
+##
+$k_dipt=0x220;         # decryption input transform
+       &data_word(0x0B545F00,0x0F505B04,0x114E451A,0x154A411E);
+       &data_word(0x60056500,0x86E383E6,0xF491F194,0x12771772);
+
+$k_dsb9=0x240;         # decryption sbox output *9*u, *9*t
+       &data_word(0x9A86D600,0x851C0353,0x4F994CC9,0xCAD51F50);
+       &data_word(0xECD74900,0xC03B1789,0xB2FBA565,0x725E2C9E);
+$k_dsbd=0x260;         # decryption sbox output *D*u, *D*t
+       &data_word(0xE6B1A200,0x7D57CCDF,0x882A4439,0xF56E9B13);
+       &data_word(0x24C6CB00,0x3CE2FAF7,0x15DEEFD3,0x2931180D);
+$k_dsbb=0x280;         # decryption sbox output *B*u, *B*t
+       &data_word(0x96B44200,0xD0226492,0xB0F2D404,0x602646F6);
+       &data_word(0xCD596700,0xC19498A6,0x3255AA6B,0xF3FF0C3E);
+$k_dsbe=0x2a0;         # decryption sbox output *E*u, *E*t
+       &data_word(0x26D4D000,0x46F29296,0x64B4F6B0,0x22426004);
+       &data_word(0xFFAAC100,0x0C55A6CD,0x98593E32,0x9467F36B);
+$k_dsbo=0x2c0;         # decryption sbox final output
+       &data_word(0x7EF94000,0x1387EA53,0xD4943E2D,0xC7AA6DB9);
+       &data_word(0x93441D00,0x12D7560F,0xD8C58E9C,0xCA4B8159);
+&asciz ("Vector Permutation AES for x86, Mike Hamburg (Stanford University)");
+&align (64);
+
+&function_begin_B("_vpaes_preheat");
+       &add    ($const,&DWP(0,"esp"));
+       &movdqa ("xmm7",&QWP($k_inv,$const));
+       &movdqa ("xmm6",&QWP($k_s0F,$const));
+       &ret    ();
+&function_end_B("_vpaes_preheat");
+
+##
+##  _aes_encrypt_core
+##
+##  AES-encrypt %xmm0.
+##
+##  Inputs:
+##     %xmm0 = input
+##     %xmm6-%xmm7 as in _vpaes_preheat
+##    (%edx) = scheduled keys
+##
+##  Output in %xmm0
+##  Clobbers  %xmm1-%xmm5, %eax, %ebx, %ecx, %edx
+##
+##
+&function_begin_B("_vpaes_encrypt_core");
+       &mov    ($magic,16);
+       &mov    ($round,&DWP(240,$key));
+       &movdqa ("xmm1","xmm6")
+       &movdqa ("xmm2",&QWP($k_ipt,$const));
+       &pandn  ("xmm1","xmm0");
+       &movdqu ("xmm5",&QWP(0,$key));
+       &psrld  ("xmm1",4);
+       &pand   ("xmm0","xmm6");
+       &pshufb ("xmm2","xmm0");
+       &movdqa ("xmm0",&QWP($k_ipt+16,$const));
+       &pshufb ("xmm0","xmm1");
+       &pxor   ("xmm2","xmm5");
+       &pxor   ("xmm0","xmm2");
+       &add    ($key,16);
+       &lea    ($base,&DWP($k_mc_backward,$const));
+       &jmp    (&label("enc_entry"));
+
+
+&set_label("enc_loop",16);
+       # middle of middle round
+       &movdqa ("xmm4",&QWP($k_sb1,$const));   # 4 : sb1u
+       &pshufb ("xmm4","xmm2");                # 4 = sb1u
+       &pxor   ("xmm4","xmm5");                # 4 = sb1u + k
+       &movdqa ("xmm0",&QWP($k_sb1+16,$const));# 0 : sb1t
+       &pshufb ("xmm0","xmm3");                # 0 = sb1t
+       &pxor   ("xmm0","xmm4");                # 0 = A
+       &movdqa ("xmm5",&QWP($k_sb2,$const));   # 4 : sb2u
+       &pshufb ("xmm5","xmm2");                # 4 = sb2u
+       &movdqa ("xmm1",&QWP(-0x40,$base,$magic));# .Lk_mc_forward[]
+       &movdqa ("xmm2",&QWP($k_sb2+16,$const));# 2 : sb2t
+       &pshufb ("xmm2","xmm3");                # 2 = sb2t
+       &pxor   ("xmm2","xmm5");                # 2 = 2A
+       &movdqa ("xmm4",&QWP(0,$base,$magic));  # .Lk_mc_backward[]
+       &movdqa ("xmm3","xmm0");                # 3 = A
+       &pshufb ("xmm0","xmm1");                # 0 = B
+       &add    ($key,16);                      # next key
+       &pxor   ("xmm0","xmm2");                # 0 = 2A+B
+       &pshufb ("xmm3","xmm4");                # 3 = D
+       &add    ($magic,16);                    # next mc
+       &pxor   ("xmm3","xmm0");                # 3 = 2A+B+D
+       &pshufb ("xmm0","xmm1");                # 0 = 2B+C
+       &and    ($magic,0x30);                  # ... mod 4
+       &pxor   ("xmm0","xmm3");                # 0 = 2A+3B+C+D
+       &sub    ($round,1);                     # nr--
+
+&set_label("enc_entry");
+       # top of round
+       &movdqa ("xmm1","xmm6");                # 1 : i
+       &pandn  ("xmm1","xmm0");                # 1 = i<<4
+       &psrld  ("xmm1",4);                     # 1 = i
+       &pand   ("xmm0","xmm6");                # 0 = k
+       &movdqa ("xmm5",&QWP($k_inv+16,$const));# 2 : a/k
+       &pshufb ("xmm5","xmm0");                # 2 = a/k
+       &pxor   ("xmm0","xmm1");                # 0 = j
+       &movdqa ("xmm3","xmm7");                # 3 : 1/i
+       &pshufb ("xmm3","xmm1");                # 3 = 1/i
+       &pxor   ("xmm3","xmm5");                # 3 = iak = 1/i + a/k
+       &movdqa ("xmm4","xmm7");                # 4 : 1/j
+       &pshufb ("xmm4","xmm0");                # 4 = 1/j
+       &pxor   ("xmm4","xmm5");                # 4 = jak = 1/j + a/k
+       &movdqa ("xmm2","xmm7");                # 2 : 1/iak
+       &pshufb ("xmm2","xmm3");                # 2 = 1/iak
+       &pxor   ("xmm2","xmm0");                # 2 = io
+       &movdqa ("xmm3","xmm7");                # 3 : 1/jak
+       &movdqu ("xmm5",&QWP(0,$key));
+       &pshufb ("xmm3","xmm4");                # 3 = 1/jak
+       &pxor   ("xmm3","xmm1");                # 3 = jo
+       &jnz    (&label("enc_loop"));
+
+       # middle of last round
+       &movdqa ("xmm4",&QWP($k_sbo,$const));   # 3 : sbou      .Lk_sbo
+       &movdqa ("xmm0",&QWP($k_sbo+16,$const));# 3 : sbot      .Lk_sbo+16
+       &pshufb ("xmm4","xmm2");                # 4 = sbou
+       &pxor   ("xmm4","xmm5");                # 4 = sb1u + k
+       &pshufb ("xmm0","xmm3");                # 0 = sb1t
+       &movdqa ("xmm1",&QWP(0x40,$base,$magic));# .Lk_sr[]
+       &pxor   ("xmm0","xmm4");                # 0 = A
+       &pshufb ("xmm0","xmm1");
+       &ret    ();
+&function_end_B("_vpaes_encrypt_core");
+
+##
+##  Decryption core
+##
+##  Same API as encryption core.
+##
+&function_begin_B("_vpaes_decrypt_core");
+       &mov    ($round,&DWP(240,$key));
+       &lea    ($base,&DWP($k_dsbd,$const));
+       &movdqa ("xmm1","xmm6");
+       &movdqa ("xmm2",&QWP($k_dipt-$k_dsbd,$base));
+       &pandn  ("xmm1","xmm0");
+       &mov    ($magic,$round);
+       &psrld  ("xmm1",4)
+       &movdqu ("xmm5",&QWP(0,$key));
+       &shl    ($magic,4);
+       &pand   ("xmm0","xmm6");
+       &pshufb ("xmm2","xmm0");
+       &movdqa ("xmm0",&DWP($k_dipt-$k_dsbd+16,$base));
+       &xor    ($magic,0x30);
+       &pshufb ("xmm0","xmm1");
+       &and    ($magic,0x30);
+       &pxor   ("xmm2","xmm5");
+       &movdqa ("xmm5",&QWP($k_mc_forward+48,$const));
+       &pxor   ("xmm0","xmm2");
+       &add    ($key,16);
+       &lea    ($magic,&DWP($k_sr-$k_dsbd,$base,$magic));
+       &jmp    (&label("dec_entry"));
+
+&set_label("dec_loop",16);
+##
+##  Inverse mix columns
+##
+       &movdqa ("xmm4",&QWP(-0x20,$base));     # 4 : sb9u
+       &pshufb ("xmm4","xmm2");                # 4 = sb9u
+       &pxor   ("xmm4","xmm0");
+       &movdqa ("xmm0",&QWP(-0x10,$base));     # 0 : sb9t
+       &pshufb ("xmm0","xmm3");                # 0 = sb9t
+       &pxor   ("xmm0","xmm4");                # 0 = ch
+       &add    ($key,16);                      # next round key
+
+       &pshufb ("xmm0","xmm5");                # MC ch
+       &movdqa ("xmm4",&QWP(0,$base));         # 4 : sbdu
+       &pshufb ("xmm4","xmm2");                # 4 = sbdu
+       &pxor   ("xmm4","xmm0");                # 4 = ch
+       &movdqa ("xmm0",&QWP(0x10,$base));      # 0 : sbdt
+       &pshufb ("xmm0","xmm3");                # 0 = sbdt
+       &pxor   ("xmm0","xmm4");                # 0 = ch
+       &sub    ($round,1);                     # nr--
+
+       &pshufb ("xmm0","xmm5");                # MC ch
+       &movdqa ("xmm4",&QWP(0x20,$base));      # 4 : sbbu
+       &pshufb ("xmm4","xmm2");                # 4 = sbbu
+       &pxor   ("xmm4","xmm0");                # 4 = ch
+       &movdqa ("xmm0",&QWP(0x30,$base));      # 0 : sbbt
+       &pshufb ("xmm0","xmm3");                # 0 = sbbt
+       &pxor   ("xmm0","xmm4");                # 0 = ch
+
+       &pshufb ("xmm0","xmm5");                # MC ch
+       &movdqa ("xmm4",&QWP(0x40,$base));      # 4 : sbeu
+       &pshufb ("xmm4","xmm2");                # 4 = sbeu
+       &pxor   ("xmm4","xmm0");                # 4 = ch
+       &movdqa ("xmm0",&QWP(0x50,$base));      # 0 : sbet
+       &pshufb ("xmm0","xmm3");                # 0 = sbet
+       &pxor   ("xmm0","xmm4");                # 0 = ch
+
+       &palignr("xmm5","xmm5",12);
+
+&set_label("dec_entry");
+       # top of round
+       &movdqa ("xmm1","xmm6");                # 1 : i
+       &pandn  ("xmm1","xmm0");                # 1 = i<<4
+       &psrld  ("xmm1",4);                     # 1 = i
+       &pand   ("xmm0","xmm6");                # 0 = k
+       &movdqa ("xmm2",&QWP($k_inv+16,$const));# 2 : a/k
+       &pshufb ("xmm2","xmm0");                # 2 = a/k
+       &pxor   ("xmm0","xmm1");                # 0 = j
+       &movdqa ("xmm3","xmm7");                # 3 : 1/i
+       &pshufb ("xmm3","xmm1");                # 3 = 1/i
+       &pxor   ("xmm3","xmm2");                # 3 = iak = 1/i + a/k
+       &movdqa ("xmm4","xmm7");                # 4 : 1/j
+       &pshufb ("xmm4","xmm0");                # 4 = 1/j
+       &pxor   ("xmm4","xmm2");                # 4 = jak = 1/j + a/k
+       &movdqa ("xmm2","xmm7");                # 2 : 1/iak
+       &pshufb ("xmm2","xmm3");                # 2 = 1/iak
+       &pxor   ("xmm2","xmm0");                # 2 = io
+       &movdqa ("xmm3","xmm7");                # 3 : 1/jak
+       &pshufb ("xmm3","xmm4");                # 3 = 1/jak
+       &pxor   ("xmm3","xmm1");                # 3 = jo
+       &movdqu ("xmm0",&QWP(0,$key));
+       &jnz    (&label("dec_loop"));
+
+       # middle of last round
+       &movdqa ("xmm4",&QWP(0x60,$base));      # 3 : sbou
+       &pshufb ("xmm4","xmm2");                # 4 = sbou
+       &pxor   ("xmm4","xmm0");                # 4 = sb1u + k
+       &movdqa ("xmm0",&QWP(0x70,$base));      # 0 : sbot
+       &movdqa ("xmm2",&QWP(0,$magic));
+       &pshufb ("xmm0","xmm3");                # 0 = sb1t
+       &pxor   ("xmm0","xmm4");                # 0 = A
+       &pshufb ("xmm0","xmm2");
+       &ret    ();
+&function_end_B("_vpaes_decrypt_core");
+
+########################################################
+##                                                    ##
+##                  AES key schedule                  ##
+##                                                    ##
+########################################################
+&function_begin_B("_vpaes_schedule_core");
+       &add    ($const,&DWP(0,"esp"));
+       &movdqu ("xmm0",&QWP(0,$inp));          # load key (unaligned)
+       &movdqa ("xmm2",&QWP($k_rcon,$const));  # load rcon
+
+       # input transform
+       &movdqa ("xmm3","xmm0");
+       &lea    ($base,&DWP($k_ipt,$const));
+       &movdqa (&QWP(4,"esp"),"xmm2");         # xmm8
+       &call   ("_vpaes_schedule_transform");
+       &movdqa ("xmm7","xmm0");
+
+       &test   ($out,$out);
+       &jnz    (&label("schedule_am_decrypting"));
+
+       # encrypting, output zeroth round key after transform
+       &movdqu (&QWP(0,$key),"xmm0");
+       &jmp    (&label("schedule_go"));
+
+&set_label("schedule_am_decrypting");
+       # decrypting, output zeroth round key after shiftrows
+       &movdqa ("xmm1",&QWP($k_sr,$const,$magic));
+       &pshufb ("xmm3","xmm1");
+       &movdqu (&QWP(0,$key),"xmm3");
+       &xor    ($magic,0x30);
+
+&set_label("schedule_go");
+       &cmp    ($round,192);
+       &ja     (&label("schedule_256"));
+       &je     (&label("schedule_192"));
+       # 128: fall though
+
+##
+##  .schedule_128
+##
+##  128-bit specific part of key schedule.
+##
+##  This schedule is really simple, because all its parts
+##  are accomplished by the subroutines.
+##
+&set_label("schedule_128");
+       &mov    ($round,10);
+
+&set_label("loop_schedule_128");
+       &call   ("_vpaes_schedule_round");
+       &dec    ($round);
+       &jz     (&label("schedule_mangle_last"));
+       &call   ("_vpaes_schedule_mangle");     # write output
+       &jmp    (&label("loop_schedule_128"));
+
+##
+##  .aes_schedule_192
+##
+##  192-bit specific part of key schedule.
+##
+##  The main body of this schedule is the same as the 128-bit
+##  schedule, but with more smearing.  The long, high side is
+##  stored in %xmm7 as before, and the short, low side is in
+##  the high bits of %xmm6.
+##
+##  This schedule is somewhat nastier, however, because each
+##  round produces 192 bits of key material, or 1.5 round keys.
+##  Therefore, on each cycle we do 2 rounds and produce 3 round
+##  keys.
+##
+&set_label("schedule_192",16);
+       &movdqu ("xmm0",&QWP(8,$inp));          # load key part 2 (very unaligned)
+       &call   ("_vpaes_schedule_transform");  # input transform       
+       &movdqa ("xmm6","xmm0");                # save short part
+       &pxor   ("xmm4","xmm4");                # clear 4
+       &movhlps("xmm6","xmm4");                # clobber low side with zeros
+       &mov    ($round,4);
+
+&set_label("loop_schedule_192");
+       &call   ("_vpaes_schedule_round");
+       &palignr("xmm0","xmm6",8);
+       &call   ("_vpaes_schedule_mangle");     # save key n
+       &call   ("_vpaes_schedule_192_smear");
+       &call   ("_vpaes_schedule_mangle");     # save key n+1
+       &call   ("_vpaes_schedule_round");
+       &dec    ($round);
+       &jz     (&label("schedule_mangle_last"));
+       &call   ("_vpaes_schedule_mangle");     # save key n+2
+       &call   ("_vpaes_schedule_192_smear");
+       &jmp    (&label("loop_schedule_192"));
+
+##
+##  .aes_schedule_256
+##
+##  256-bit specific part of key schedule.
+##
+##  The structure here is very similar to the 128-bit
+##  schedule, but with an additional "low side" in
+##  %xmm6.  The low side's rounds are the same as the
+##  high side's, except no rcon and no rotation.
+##
+&set_label("schedule_256",16);
+       &movdqu ("xmm0",&QWP(16,$inp));         # load key part 2 (unaligned)
+       &call   ("_vpaes_schedule_transform");  # input transform       
+       &mov    ($round,7);
+
+&set_label("loop_schedule_256");
+       &call   ("_vpaes_schedule_mangle");     # output low result
+       &movdqa ("xmm6","xmm0");                # save cur_lo in xmm6
+
+       # high round
+       &call   ("_vpaes_schedule_round");
+       &dec    ($round);
+       &jz     (&label("schedule_mangle_last"));
+       &call   ("_vpaes_schedule_mangle");     
+
+       # low round. swap xmm7 and xmm6
+       &pshufd ("xmm0","xmm0",0xFF);
+       &movdqa (&QWP(20,"esp"),"xmm7");
+       &movdqa ("xmm7","xmm6");
+       &call   ("_vpaes_schedule_low_round");
+       &movdqa ("xmm7",&QWP(20,"esp"));
+
+       &jmp    (&label("loop_schedule_256"));
+
+##
+##  .aes_schedule_mangle_last
+##
+##  Mangler for last round of key schedule
+##  Mangles %xmm0
+##    when encrypting, outputs out(%xmm0) ^ 63
+##    when decrypting, outputs unskew(%xmm0)
+##
+##  Always called right before return... jumps to cleanup and exits
+##
+&set_label("schedule_mangle_last",16);
+       # schedule last round key from xmm0
+       &lea    ($base,&DWP($k_deskew,$const));
+       &test   ($out,$out);
+       &jnz    (&label("schedule_mangle_last_dec"));
+
+       # encrypting
+       &movdqa ("xmm1",&QWP($k_sr,$const,$magic));
+       &pshufb ("xmm0","xmm1");                # output permute
+       &lea    ($base,&DWP($k_opt,$const));    # prepare to output transform
+       &add    ($key,32);
+
+&set_label("schedule_mangle_last_dec");
+       &add    ($key,-16);
+       &pxor   ("xmm0",&QWP($k_s63,$const));
+       &call   ("_vpaes_schedule_transform");  # output transform
+       &movdqu (&QWP(0,$key),"xmm0");          # save last key
+
+       # cleanup
+       &pxor   ("xmm0","xmm0");
+       &pxor   ("xmm1","xmm1");
+       &pxor   ("xmm2","xmm2");
+       &pxor   ("xmm3","xmm3");
+       &pxor   ("xmm4","xmm4");
+       &pxor   ("xmm5","xmm5");
+       &pxor   ("xmm6","xmm6");
+       &pxor   ("xmm7","xmm7");
+       &ret    ();
+&function_end_B("_vpaes_schedule_core");
+
+##
+##  .aes_schedule_192_smear
+##
+##  Smear the short, low side in the 192-bit key schedule.
+##
+##  Inputs:
+##    %xmm7: high side, b  a  x  y
+##    %xmm6:  low side, d  c  0  0
+##    %xmm13: 0
+##
+##  Outputs:
+##    %xmm6: b+c+d  b+c  0  0
+##    %xmm0: b+c+d  b+c  b  a
+##
+&function_begin_B("_vpaes_schedule_192_smear");
+       &pshufd ("xmm0","xmm6",0x80);           # d c 0 0 -> c 0 0 0
+       &pxor   ("xmm6","xmm0");                # -> c+d c 0 0
+       &pshufd ("xmm0","xmm7",0xFE);           # b a _ _ -> b b b a
+       &pxor   ("xmm6","xmm0");                # -> b+c+d b+c b a
+       &movdqa ("xmm0","xmm6");
+       &pxor   ("xmm1","xmm1");
+       &movhlps("xmm6","xmm1");                # clobber low side with zeros
+       &ret    ();
+&function_end_B("_vpaes_schedule_192_smear");
+
+##
+##  .aes_schedule_round
+##
+##  Runs one main round of the key schedule on %xmm0, %xmm7
+##
+##  Specifically, runs subbytes on the high dword of %xmm0
+##  then rotates it by one byte and xors into the low dword of
+##  %xmm7.
+##
+##  Adds rcon from low byte of %xmm8, then rotates %xmm8 for
+##  next rcon.
+##
+##  Smears the dwords of %xmm7 by xoring the low into the
+##  second low, result into third, result into highest.
+##
+##  Returns results in %xmm7 = %xmm0.
+##  Clobbers %xmm1-%xmm5.
+##
+&function_begin_B("_vpaes_schedule_round");
+       # extract rcon from xmm8
+       &movdqa ("xmm2",&QWP(8,"esp"));         # xmm8
+       &pxor   ("xmm1","xmm1");
+       &palignr("xmm1","xmm2",15);
+       &palignr("xmm2","xmm2",15);
+       &pxor   ("xmm7","xmm1");
+
+       # rotate
+       &pshufd ("xmm0","xmm0",0xFF);
+       &palignr("xmm0","xmm0",1);
+
+       # fall through...
+       &movdqa (&QWP(8,"esp"),"xmm2");         # xmm8
+
+       # low round: same as high round, but no rotation and no rcon.
+&set_label("_vpaes_schedule_low_round");
+       # smear xmm7
+       &movdqa ("xmm1","xmm7");
+       &pslldq ("xmm7",4);
+       &pxor   ("xmm7","xmm1");
+       &movdqa ("xmm1","xmm7");
+       &pslldq ("xmm7",8);
+       &pxor   ("xmm7","xmm1");
+       &pxor   ("xmm7",&QWP($k_s63,$const));
+
+       # subbyte
+       &movdqa ("xmm4",&QWP($k_s0F,$const));
+       &movdqa ("xmm5",&QWP($k_inv,$const));   # 4 : 1/j
+       &movdqa ("xmm1","xmm4");        
+       &pandn  ("xmm1","xmm0");
+       &psrld  ("xmm1",4);                     # 1 = i
+       &pand   ("xmm0","xmm4");                # 0 = k
+       &movdqa ("xmm2",&QWP($k_inv+16,$const));# 2 : a/k
+       &pshufb ("xmm2","xmm0");                # 2 = a/k
+       &pxor   ("xmm0","xmm1");                # 0 = j
+       &movdqa ("xmm3","xmm5");                # 3 : 1/i
+       &pshufb ("xmm3","xmm1");                # 3 = 1/i
+       &pxor   ("xmm3","xmm2");                # 3 = iak = 1/i + a/k
+       &movdqa ("xmm4","xmm5");                # 4 : 1/j
+       &pshufb ("xmm4","xmm0");                # 4 = 1/j
+       &pxor   ("xmm4","xmm2");                # 4 = jak = 1/j + a/k
+       &movdqa ("xmm2","xmm5");                # 2 : 1/iak
+       &pshufb ("xmm2","xmm3");                # 2 = 1/iak
+       &pxor   ("xmm2","xmm0");                # 2 = io
+       &movdqa ("xmm3","xmm5");                # 3 : 1/jak
+       &pshufb ("xmm3","xmm4");                # 3 = 1/jak
+       &pxor   ("xmm3","xmm1");                # 3 = jo
+       &movdqa ("xmm4",&QWP($k_sb1,$const));   # 4 : sbou
+       &pshufb ("xmm4","xmm2");                # 4 = sbou
+       &movdqa ("xmm0",&QWP($k_sb1+16,$const));# 0 : sbot
+       &pshufb ("xmm0","xmm3");                # 0 = sb1t
+       &pxor   ("xmm0","xmm4");                # 0 = sbox output
+
+       # add in smeared stuff
+       &pxor   ("xmm0","xmm7");
+       &movdqa ("xmm7","xmm0");
+       &ret    ();
+&function_end_B("_vpaes_schedule_round");
+
+##
+##  .aes_schedule_transform
+##
+##  Linear-transform %xmm0 according to tables at (%ebx)
+##
+##  Output in %xmm0
+##  Clobbers %xmm1, %xmm2
+##
+&function_begin_B("_vpaes_schedule_transform");
+       &movdqa ("xmm2",&QWP($k_s0F,$const));
+       &movdqa ("xmm1","xmm2");
+       &pandn  ("xmm1","xmm0");
+       &psrld  ("xmm1",4);
+       &pand   ("xmm0","xmm2");
+       &movdqa ("xmm2",&QWP(0,$base));
+       &pshufb ("xmm2","xmm0");
+       &movdqa ("xmm0",&QWP(16,$base));
+       &pshufb ("xmm0","xmm1");
+       &pxor   ("xmm0","xmm2");
+       &ret    ();
+&function_end_B("_vpaes_schedule_transform");
+
+##
+##  .aes_schedule_mangle
+##
+##  Mangle xmm0 from (basis-transformed) standard version
+##  to our version.
+##
+##  On encrypt,
+##    xor with 0x63
+##    multiply by circulant 0,1,1,1
+##    apply shiftrows transform
+##
+##  On decrypt,
+##    xor with 0x63
+##    multiply by "inverse mixcolumns" circulant E,B,D,9
+##    deskew
+##    apply shiftrows transform
+##
+##
+##  Writes out to (%edx), and increments or decrements it
+##  Keeps track of round number mod 4 in %ecx
+##  Preserves xmm0
+##  Clobbers xmm1-xmm5
+##
+&function_begin_B("_vpaes_schedule_mangle");
+       &movdqa ("xmm4","xmm0");        # save xmm0 for later
+       &movdqa ("xmm5",&QWP($k_mc_forward,$const));
+       &test   ($out,$out);
+       &jnz    (&label("schedule_mangle_dec"));
+
+       # encrypting
+       &add    ($key,16);
+       &pxor   ("xmm4",&QWP($k_s63,$const));
+       &pshufb ("xmm4","xmm5");
+       &movdqa ("xmm3","xmm4");
+       &pshufb ("xmm4","xmm5");
+       &pxor   ("xmm3","xmm4");
+       &pshufb ("xmm4","xmm5");
+       &pxor   ("xmm3","xmm4");
+
+       &jmp    (&label("schedule_mangle_both"));
+
+&set_label("schedule_mangle_dec",16);
+       # inverse mix columns
+       &movdqa ("xmm2",&QWP($k_s0F,$const));
+       &lea    ($inp,&DWP($k_dksd,$const));
+       &movdqa ("xmm1","xmm2");
+       &pandn  ("xmm1","xmm4");
+       &psrld  ("xmm1",4);                     # 1 = hi
+       &pand   ("xmm4","xmm2");                # 4 = lo
+
+       &movdqa ("xmm2",&QWP(0,$inp));
+       &pshufb ("xmm2","xmm4");
+       &movdqa ("xmm3",&QWP(0x10,$inp));
+       &pshufb ("xmm3","xmm1");
+       &pxor   ("xmm3","xmm2");
+       &pshufb ("xmm3","xmm5");
+
+       &movdqa ("xmm2",&QWP(0x20,$inp));
+       &pshufb ("xmm2","xmm4");
+       &pxor   ("xmm2","xmm3");
+       &movdqa ("xmm3",&QWP(0x30,$inp));
+       &pshufb ("xmm3","xmm1");
+       &pxor   ("xmm3","xmm2");
+       &pshufb ("xmm3","xmm5");
+
+       &movdqa ("xmm2",&QWP(0x40,$inp));
+       &pshufb ("xmm2","xmm4");
+       &pxor   ("xmm2","xmm3");
+       &movdqa ("xmm3",&QWP(0x50,$inp));
+       &pshufb ("xmm3","xmm1");
+       &pxor   ("xmm3","xmm2");
+       &pshufb ("xmm3","xmm5");
+
+       &movdqa ("xmm2",&QWP(0x60,$inp));
+       &pshufb ("xmm2","xmm4");
+       &pxor   ("xmm2","xmm3");
+       &movdqa ("xmm3",&QWP(0x70,$inp));
+       &pshufb ("xmm3","xmm1");
+       &pxor   ("xmm3","xmm2");
+
+       &add    ($key,-16);
+
+&set_label("schedule_mangle_both");
+       &movdqa ("xmm1",&QWP($k_sr,$const,$magic));
+       &pshufb ("xmm3","xmm1");
+       &add    ($magic,-16);
+       &and    ($magic,0x30);
+       &movdqu (&QWP(0,$key),"xmm3");
+       &ret    ();
+&function_end_B("_vpaes_schedule_mangle");
+
+#
+# Interface to OpenSSL
+#
+&function_begin("${PREFIX}_set_encrypt_key");
+       &mov    ($inp,&wparam(0));              # inp
+       &lea    ($base,&DWP(-56,"esp"));
+       &mov    ($round,&wparam(1));            # bits
+       &and    ($base,-16);
+       &mov    ($key,&wparam(2));              # key
+       &xchg   ($base,"esp");                  # alloca
+       &mov    (&DWP(48,"esp"),$base);
+
+       &mov    ($base,$round);
+       &shr    ($base,5);
+       &add    ($base,5);
+       &mov    (&DWP(240,$key),$base);         # AES_KEY->rounds = nbits/32+5;
+       &mov    ($magic,0x30);
+       &mov    ($out,0);
+
+       &lea    ($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+       &call   ("_vpaes_schedule_core");
+&set_label("pic_point");
+
+       &mov    ("esp",&DWP(48,"esp"));
+       &xor    ("eax","eax");
+&function_end("${PREFIX}_set_encrypt_key");
+
+&function_begin("${PREFIX}_set_decrypt_key");
+       &mov    ($inp,&wparam(0));              # inp
+       &lea    ($base,&DWP(-56,"esp"));
+       &mov    ($round,&wparam(1));            # bits
+       &and    ($base,-16);
+       &mov    ($key,&wparam(2));              # key
+       &xchg   ($base,"esp");                  # alloca
+       &mov    (&DWP(48,"esp"),$base);
+
+       &mov    ($base,$round);
+       &shr    ($base,5);
+       &add    ($base,5);
+       &mov    (&DWP(240,$key),$base); # AES_KEY->rounds = nbits/32+5;
+       &shl    ($base,4);
+       &lea    ($key,&DWP(16,$key,$base));
+
+       &mov    ($out,1);
+       &mov    ($magic,$round);
+       &shr    ($magic,1);
+       &and    ($magic,32);
+       &xor    ($magic,32);                    # nbist==192?0:32;
+
+       &lea    ($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+       &call   ("_vpaes_schedule_core");
+&set_label("pic_point");
+
+       &mov    ("esp",&DWP(48,"esp"));
+       &xor    ("eax","eax");
+&function_end("${PREFIX}_set_decrypt_key");
+
+&function_begin("${PREFIX}_encrypt");
+       &lea    ($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+       &call   ("_vpaes_preheat");
+&set_label("pic_point");
+       &mov    ($inp,&wparam(0));              # inp
+       &lea    ($base,&DWP(-56,"esp"));
+       &mov    ($out,&wparam(1));              # out
+       &and    ($base,-16);
+       &mov    ($key,&wparam(2));              # key
+       &xchg   ($base,"esp");                  # alloca
+       &mov    (&DWP(48,"esp"),$base);
+
+       &movdqu ("xmm0",&QWP(0,$inp));
+       &call   ("_vpaes_encrypt_core");
+       &movdqu (&QWP(0,$out),"xmm0");
+
+       &mov    ("esp",&DWP(48,"esp"));
+&function_end("${PREFIX}_encrypt");
+
+&function_begin("${PREFIX}_decrypt");
+       &lea    ($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+       &call   ("_vpaes_preheat");
+&set_label("pic_point");
+       &mov    ($inp,&wparam(0));              # inp
+       &lea    ($base,&DWP(-56,"esp"));
+       &mov    ($out,&wparam(1));              # out
+       &and    ($base,-16);
+       &mov    ($key,&wparam(2));              # key
+       &xchg   ($base,"esp");                  # alloca
+       &mov    (&DWP(48,"esp"),$base);
+
+       &movdqu ("xmm0",&QWP(0,$inp));
+       &call   ("_vpaes_decrypt_core");
+       &movdqu (&QWP(0,$out),"xmm0");
+
+       &mov    ("esp",&DWP(48,"esp"));
+&function_end("${PREFIX}_decrypt");
+
+&function_begin("${PREFIX}_cbc_encrypt");
+       &mov    ($inp,&wparam(0));              # inp
+       &mov    ($out,&wparam(1));              # out
+       &mov    ($round,&wparam(2));            # len
+       &mov    ($key,&wparam(3));              # key
+       &lea    ($base,&DWP(-56,"esp"));
+       &mov    ($const,&wparam(4));            # ivp
+       &and    ($base,-16);
+       &mov    ($magic,&wparam(5));            # enc
+       &xchg   ($base,"esp");                  # alloca
+       &movdqu ("xmm1",&QWP(0,$const));        # load IV
+       &sub    ($out,$inp);
+       &mov    (&DWP(48,"esp"),$base);
+
+       &mov    (&DWP(0,"esp"),$out);           # save out
+       &sub    ($round,16);
+       &mov    (&DWP(4,"esp"),$key)            # save key
+       &mov    (&DWP(8,"esp"),$const);         # save ivp
+       &mov    ($out,$round);                  # $out works as $len
+
+       &lea    ($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+       &call   ("_vpaes_preheat");
+&set_label("pic_point");
+       &cmp    ($magic,0);
+       &je     (&label("cbc_dec_loop"));
+       &jmp    (&label("cbc_enc_loop"));
+
+&set_label("cbc_enc_loop",16);
+       &movdqu ("xmm0",&QWP(0,$inp));          # load input
+       &pxor   ("xmm0","xmm1");                # inp^=iv
+       &call   ("_vpaes_encrypt_core");
+       &mov    ($base,&DWP(0,"esp"));          # restore out
+       &mov    ($key,&DWP(4,"esp"));           # restore key
+       &movdqa ("xmm1","xmm0");
+       &movdqu (&QWP(0,$base,$inp),"xmm0");    # write output
+       &lea    ($inp,&DWP(16,$inp));
+       &sub    ($out,16);
+       &jnc    (&label("cbc_enc_loop"));
+       &jmp    (&label("cbc_done"));
+
+&set_label("cbc_dec_loop",16);
+       &movdqu ("xmm0",&QWP(0,$inp));          # load input
+       &movdqa (&QWP(16,"esp"),"xmm1");        # save IV
+       &movdqa (&QWP(32,"esp"),"xmm0");        # save future IV
+       &call   ("_vpaes_decrypt_core");
+       &mov    ($base,&DWP(0,"esp"));          # restore out
+       &mov    ($key,&DWP(4,"esp"));           # restore key
+       &pxor   ("xmm0",&QWP(16,"esp"));        # out^=iv
+       &movdqa ("xmm1",&QWP(32,"esp"));        # load next IV
+       &movdqu (&QWP(0,$base,$inp),"xmm0");    # write output
+       &lea    ($inp,&DWP(16,$inp));
+       &sub    ($out,16);
+       &jnc    (&label("cbc_dec_loop"));
+
+&set_label("cbc_done");
+       &mov    ($base,&DWP(8,"esp"));          # restore ivp
+       &mov    ("esp",&DWP(48,"esp"));
+       &movdqu (&QWP(0,$base),"xmm1");         # write IV
+&function_end("${PREFIX}_cbc_encrypt");
+
+&asm_finish();
diff --git a/crypto/aes/asm/vpaes-x86_64.pl b/crypto/aes/asm/vpaes-x86_64.pl
new file mode 100644 (file)
index 0000000..17651e1
--- /dev/null
@@ -0,0 +1,1203 @@
+#!/usr/bin/env perl
+
+######################################################################
+## Constant-time SSSE3 AES core implementation.
+## version 0.1
+##
+## By Mike Hamburg (Stanford University), 2009
+## Public domain.
+##
+## For details see http://shiftleft.org/papers/vector_aes/ and
+## http://crypto.stanford.edu/vpaes/.
+
+######################################################################
+# September 2011.
+#
+# Interface to OpenSSL as "almost" drop-in replacement for
+# aes-x86_64.pl. "Almost" refers to the fact that AES_cbc_encrypt
+# doesn't handle partial vectors (doesn't have to if called from
+# EVP only). "Drop-in" implies that this module doesn't share key
+# schedule structure with the original nor does it make assumption
+# about its alignment...
+#
+# Performance summary. aes-x86_64.pl column lists large-block CBC
+# encrypt/decrypt/with-hypert-hreading-off(*) results in cycles per
+# byte processed with 128-bit key, and vpaes-x86_64.pl column -
+# encrypt/decrypt.
+#
+#              aes-x86_64.pl           vpaes-x86_64.pl
+#
+# Core 2(**)   30.5/43.7/14.3          21.8/25.7(***)
+# Nehalem      30.5/42.2/14.6           9.8/11.8
+# Atom         63.9/79.0/32.1          64.0/84.8(***)
+#
+# (*)  "Hyper-threading" in the context refers rather to cache shared
+#      among multiple cores, than to specifically Intel HTT. As vast
+#      majority of contemporary cores share cache, slower code path
+#      is common place. In other words "with-hyper-threading-off"
+#      results are presented mostly for reference purposes.
+#
+# (**) "Core 2" refers to initial 65nm design, a.k.a. Conroe.
+#
+# (***)        Less impressive improvement on Core 2 and Atom is due to slow
+#      pshufb, yet it's respectable +40%/78% improvement on Core 2.
+#
+#                                              <appro@openss.org>
+
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour $output";
+
+$PREFIX="AES";
+
+$code.=<<___;
+.text
+
+##
+##  _aes_encrypt_core
+##
+##  AES-encrypt %xmm0.
+##
+##  Inputs:
+##     %xmm0 = input
+##     %xmm9-%xmm15 as in _vpaes_preheat
+##    (%rdx) = scheduled keys
+##
+##  Output in %xmm0
+##  Clobbers  %xmm1-%xmm5, %r9, %r10, %r11, %rax
+##  Preserves %xmm6 - %xmm8 so you get some local vectors
+##
+##
+.type  _vpaes_encrypt_core,\@abi-omnipotent
+.align 16
+_vpaes_encrypt_core:
+       mov     %rdx,   %r9
+       mov     \$16,   %r11
+       mov     240(%rdx),%eax
+       movdqa  %xmm9,  %xmm1
+       movdqa  .Lk_ipt(%rip), %xmm2    # iptlo
+       pandn   %xmm0,  %xmm1
+       movdqu  (%r9),  %xmm5           # round0 key
+       psrld   \$4,    %xmm1
+       pand    %xmm9,  %xmm0
+       pshufb  %xmm0,  %xmm2
+       movdqa  .Lk_ipt+16(%rip), %xmm0 # ipthi
+       pshufb  %xmm1,  %xmm0
+       pxor    %xmm5,  %xmm2
+       pxor    %xmm2,  %xmm0
+       add     \$16,   %r9
+       lea     .Lk_mc_backward(%rip),%r10
+       jmp     .Lenc_entry
+
+.align 16
+.Lenc_loop:
+       # middle of middle round
+       movdqa  %xmm13, %xmm4   # 4 : sb1u
+       pshufb  %xmm2,  %xmm4   # 4 = sb1u
+       pxor    %xmm5,  %xmm4   # 4 = sb1u + k
+       movdqa  %xmm12, %xmm0   # 0 : sb1t
+       pshufb  %xmm3,  %xmm0   # 0 = sb1t
+       pxor    %xmm4,  %xmm0   # 0 = A
+       movdqa  %xmm15, %xmm5   # 4 : sb2u
+       pshufb  %xmm2,  %xmm5   # 4 = sb2u
+       movdqa  -0x40(%r11,%r10), %xmm1         # .Lk_mc_forward[]
+       movdqa  %xmm14, %xmm2   # 2 : sb2t
+       pshufb  %xmm3,  %xmm2   # 2 = sb2t
+       pxor    %xmm5,  %xmm2   # 2 = 2A
+       movdqa  (%r11,%r10), %xmm4              # .Lk_mc_backward[]
+       movdqa  %xmm0,  %xmm3   # 3 = A
+       pshufb  %xmm1,  %xmm0   # 0 = B
+       add     \$16,   %r9     # next key
+       pxor    %xmm2,  %xmm0   # 0 = 2A+B
+       pshufb  %xmm4,  %xmm3   # 3 = D
+       add     \$16,   %r11    # next mc
+       pxor    %xmm0,  %xmm3   # 3 = 2A+B+D
+       pshufb  %xmm1,  %xmm0   # 0 = 2B+C
+       and     \$0x30, %r11    # ... mod 4
+       pxor    %xmm3,  %xmm0   # 0 = 2A+3B+C+D
+       sub     \$1,%rax        # nr--
+
+.Lenc_entry:
+       # top of round
+       movdqa  %xmm9,  %xmm1   # 1 : i
+       pandn   %xmm0,  %xmm1   # 1 = i<<4
+       psrld   \$4,    %xmm1   # 1 = i
+       pand    %xmm9,  %xmm0   # 0 = k
+       movdqa  %xmm11, %xmm5   # 2 : a/k
+       pshufb  %xmm0,  %xmm5   # 2 = a/k
+       pxor    %xmm1,  %xmm0   # 0 = j
+       movdqa  %xmm10, %xmm3   # 3 : 1/i
+       pshufb  %xmm1,  %xmm3   # 3 = 1/i
+       pxor    %xmm5,  %xmm3   # 3 = iak = 1/i + a/k
+       movdqa  %xmm10, %xmm4   # 4 : 1/j
+       pshufb  %xmm0,  %xmm4   # 4 = 1/j
+       pxor    %xmm5,  %xmm4   # 4 = jak = 1/j + a/k
+       movdqa  %xmm10, %xmm2   # 2 : 1/iak
+       pshufb  %xmm3,  %xmm2   # 2 = 1/iak
+       pxor    %xmm0,  %xmm2   # 2 = io
+       movdqa  %xmm10, %xmm3   # 3 : 1/jak
+       movdqu  (%r9),  %xmm5
+       pshufb  %xmm4,  %xmm3   # 3 = 1/jak
+       pxor    %xmm1,  %xmm3   # 3 = jo
+       jnz     .Lenc_loop
+
+       # middle of last round
+       movdqa  -0x60(%r10), %xmm4      # 3 : sbou      .Lk_sbo
+       movdqa  -0x50(%r10), %xmm0      # 0 : sbot      .Lk_sbo+16
+       pshufb  %xmm2,  %xmm4   # 4 = sbou
+       pxor    %xmm5,  %xmm4   # 4 = sb1u + k
+       pshufb  %xmm3,  %xmm0   # 0 = sb1t
+       movdqa  0x40(%r11,%r10), %xmm1          # .Lk_sr[]
+       pxor    %xmm4,  %xmm0   # 0 = A
+       pshufb  %xmm1,  %xmm0
+       ret
+.size  _vpaes_encrypt_core,.-_vpaes_encrypt_core
+       
+##
+##  Decryption core
+##
+##  Same API as encryption core.
+##
+.type  _vpaes_decrypt_core,\@abi-omnipotent
+.align 16
+_vpaes_decrypt_core:
+       mov     %rdx,   %r9             # load key
+       mov     240(%rdx),%eax
+       movdqa  %xmm9,  %xmm1
+       movdqa  .Lk_dipt(%rip), %xmm2   # iptlo
+       pandn   %xmm0,  %xmm1
+       mov     %rax,   %r11
+       psrld   \$4,    %xmm1
+       movdqu  (%r9),  %xmm5           # round0 key
+       shl     \$4,    %r11
+       pand    %xmm9,  %xmm0
+       pshufb  %xmm0,  %xmm2
+       movdqa  .Lk_dipt+16(%rip), %xmm0 # ipthi
+       xor     \$0x30, %r11
+       lea     .Lk_dsbd(%rip),%r10
+       pshufb  %xmm1,  %xmm0
+       and     \$0x30, %r11
+       pxor    %xmm5,  %xmm2
+       movdqa  .Lk_mc_forward+48(%rip), %xmm5
+       pxor    %xmm2,  %xmm0
+       add     \$16,   %r9
+       add     %r10,   %r11
+       jmp     .Ldec_entry
+
+.align 16
+.Ldec_loop:
+##
+##  Inverse mix columns
+##
+       movdqa  -0x20(%r10),%xmm4       # 4 : sb9u
+       pshufb  %xmm2,  %xmm4           # 4 = sb9u
+       pxor    %xmm0,  %xmm4
+       movdqa  -0x10(%r10),%xmm0       # 0 : sb9t
+       pshufb  %xmm3,  %xmm0           # 0 = sb9t
+       pxor    %xmm4,  %xmm0           # 0 = ch
+       add     \$16, %r9               # next round key
+
+       pshufb  %xmm5,  %xmm0           # MC ch
+       movdqa  0x00(%r10),%xmm4        # 4 : sbdu
+       pshufb  %xmm2,  %xmm4           # 4 = sbdu
+       pxor    %xmm0,  %xmm4           # 4 = ch
+       movdqa  0x10(%r10),%xmm0        # 0 : sbdt
+       pshufb  %xmm3,  %xmm0           # 0 = sbdt
+       pxor    %xmm4,  %xmm0           # 0 = ch
+       sub     \$1,%rax                # nr--
+       
+       pshufb  %xmm5,  %xmm0           # MC ch
+       movdqa  0x20(%r10),%xmm4        # 4 : sbbu
+       pshufb  %xmm2,  %xmm4           # 4 = sbbu
+       pxor    %xmm0,  %xmm4           # 4 = ch
+       movdqa  0x30(%r10),%xmm0        # 0 : sbbt
+       pshufb  %xmm3,  %xmm0           # 0 = sbbt
+       pxor    %xmm4,  %xmm0           # 0 = ch
+       
+       pshufb  %xmm5,  %xmm0           # MC ch
+       movdqa  0x40(%r10),%xmm4        # 4 : sbeu
+       pshufb  %xmm2,  %xmm4           # 4 = sbeu
+       pxor    %xmm0,  %xmm4           # 4 = ch
+       movdqa  0x50(%r10),%xmm0        # 0 : sbet
+       pshufb  %xmm3,  %xmm0           # 0 = sbet
+       pxor    %xmm4,  %xmm0           # 0 = ch
+
+       palignr \$12,   %xmm5,  %xmm5
+       
+.Ldec_entry:
+       # top of round
+       movdqa  %xmm9,  %xmm1   # 1 : i
+       pandn   %xmm0,  %xmm1   # 1 = i<<4
+       psrld   \$4,    %xmm1   # 1 = i
+       pand    %xmm9,  %xmm0   # 0 = k
+       movdqa  %xmm11, %xmm2   # 2 : a/k
+       pshufb  %xmm0,  %xmm2   # 2 = a/k
+       pxor    %xmm1,  %xmm0   # 0 = j
+       movdqa  %xmm10, %xmm3   # 3 : 1/i
+       pshufb  %xmm1,  %xmm3   # 3 = 1/i
+       pxor    %xmm2,  %xmm3   # 3 = iak = 1/i + a/k
+       movdqa  %xmm10, %xmm4   # 4 : 1/j
+       pshufb  %xmm0,  %xmm4   # 4 = 1/j
+       pxor    %xmm2,  %xmm4   # 4 = jak = 1/j + a/k
+       movdqa  %xmm10, %xmm2   # 2 : 1/iak
+       pshufb  %xmm3,  %xmm2   # 2 = 1/iak
+       pxor    %xmm0,  %xmm2   # 2 = io
+       movdqa  %xmm10, %xmm3   # 3 : 1/jak
+       pshufb  %xmm4,  %xmm3   # 3 = 1/jak
+       pxor    %xmm1,  %xmm3   # 3 = jo
+       movdqu  (%r9),  %xmm0
+       jnz     .Ldec_loop
+
+       # middle of last round
+       movdqa  0x60(%r10), %xmm4       # 3 : sbou
+       pshufb  %xmm2,  %xmm4   # 4 = sbou
+       pxor    %xmm0,  %xmm4   # 4 = sb1u + k
+       movdqa  0x70(%r10), %xmm0       # 0 : sbot
+       movdqa  .Lk_sr-.Lk_dsbd(%r11), %xmm2
+       pshufb  %xmm3,  %xmm0   # 0 = sb1t
+       pxor    %xmm4,  %xmm0   # 0 = A
+       pshufb  %xmm2,  %xmm0
+       ret
+.size  _vpaes_decrypt_core,.-_vpaes_decrypt_core
+
+########################################################
+##                                                    ##
+##                  AES key schedule                  ##
+##                                                    ##
+########################################################
+.type  _vpaes_schedule_core,\@abi-omnipotent
+.align 16
+_vpaes_schedule_core:
+       # rdi = key
+       # rsi = size in bits
+       # rdx = buffer
+       # rcx = direction.  0=encrypt, 1=decrypt
+
+       call    _vpaes_preheat          # load the tables
+       movdqa  .Lk_rcon(%rip), %xmm8   # load rcon
+       movdqu  (%rdi), %xmm0           # load key (unaligned)
+
+       # input transform
+       movdqa  %xmm0,  %xmm3
+       lea     .Lk_ipt(%rip), %r11
+       call    _vpaes_schedule_transform
+       movdqa  %xmm0,  %xmm7
+
+       lea     .Lk_sr(%rip),%r10
+       test    %rcx,   %rcx
+       jnz     .Lschedule_am_decrypting
+
+       # encrypting, output zeroth round key after transform
+       movdqu  %xmm0,  (%rdx)
+       jmp     .Lschedule_go
+
+.Lschedule_am_decrypting:
+       # decrypting, output zeroth round key after shiftrows
+       movdqa  (%r8,%r10),%xmm1
+       pshufb  %xmm1,  %xmm3
+       movdqu  %xmm3,  (%rdx)
+       xor     \$0x30, %r8
+
+.Lschedule_go:
+       cmp     \$192,  %esi
+       ja      .Lschedule_256
+       je      .Lschedule_192
+       # 128: fall though
+
+##
+##  .schedule_128
+##
+##  128-bit specific part of key schedule.
+##
+##  This schedule is really simple, because all its parts
+##  are accomplished by the subroutines.
+##
+.Lschedule_128:
+       mov     \$10, %esi
+       
+.Loop_schedule_128:
+       call    _vpaes_schedule_round
+       dec     %rsi
+       jz      .Lschedule_mangle_last
+       call    _vpaes_schedule_mangle  # write output
+       jmp     .Loop_schedule_128
+
+##
+##  .aes_schedule_192
+##
+##  192-bit specific part of key schedule.
+##
+##  The main body of this schedule is the same as the 128-bit
+##  schedule, but with more smearing.  The long, high side is
+##  stored in %xmm7 as before, and the short, low side is in
+##  the high bits of %xmm6.
+##
+##  This schedule is somewhat nastier, however, because each
+##  round produces 192 bits of key material, or 1.5 round keys.
+##  Therefore, on each cycle we do 2 rounds and produce 3 round
+##  keys.
+##
+.align 16
+.Lschedule_192:
+       movdqu  8(%rdi),%xmm0           # load key part 2 (very unaligned)
+       call    _vpaes_schedule_transform       # input transform
+       movdqa  %xmm0,  %xmm6           # save short part
+       pxor    %xmm4,  %xmm4           # clear 4
+       movhlps %xmm4,  %xmm6           # clobber low side with zeros
+       mov     \$4,    %esi
+
+.Loop_schedule_192:
+       call    _vpaes_schedule_round
+       palignr \$8,%xmm6,%xmm0 
+       call    _vpaes_schedule_mangle  # save key n
+       call    _vpaes_schedule_192_smear
+       call    _vpaes_schedule_mangle  # save key n+1
+       call    _vpaes_schedule_round
+       dec     %rsi
+       jz      .Lschedule_mangle_last
+       call    _vpaes_schedule_mangle  # save key n+2
+       call    _vpaes_schedule_192_smear
+       jmp     .Loop_schedule_192
+
+##
+##  .aes_schedule_256
+##
+##  256-bit specific part of key schedule.
+##
+##  The structure here is very similar to the 128-bit
+##  schedule, but with an additional "low side" in
+##  %xmm6.  The low side's rounds are the same as the
+##  high side's, except no rcon and no rotation.
+##
+.align 16
+.Lschedule_256:
+       movdqu  16(%rdi),%xmm0          # load key part 2 (unaligned)
+       call    _vpaes_schedule_transform       # input transform
+       mov     \$7, %esi
+       
+.Loop_schedule_256:
+       call    _vpaes_schedule_mangle  # output low result
+       movdqa  %xmm0,  %xmm6           # save cur_lo in xmm6
+
+       # high round
+       call    _vpaes_schedule_round
+       dec     %rsi
+       jz      .Lschedule_mangle_last
+       call    _vpaes_schedule_mangle  
+
+       # low round. swap xmm7 and xmm6
+       pshufd  \$0xFF, %xmm0,  %xmm0
+       movdqa  %xmm7,  %xmm5
+       movdqa  %xmm6,  %xmm7
+       call    _vpaes_schedule_low_round
+       movdqa  %xmm5,  %xmm7
+       
+       jmp     .Loop_schedule_256
+
+       
+##
+##  .aes_schedule_mangle_last
+##
+##  Mangler for last round of key schedule
+##  Mangles %xmm0
+##    when encrypting, outputs out(%xmm0) ^ 63
+##    when decrypting, outputs unskew(%xmm0)
+##
+##  Always called right before return... jumps to cleanup and exits
+##
+.align 16
+.Lschedule_mangle_last:
+       # schedule last round key from xmm0
+       lea     .Lk_deskew(%rip),%r11   # prepare to deskew
+       test    %rcx,   %rcx
+       jnz     .Lschedule_mangle_last_dec
+
+       # encrypting
+       movdqa  (%r8,%r10),%xmm1
+       pshufb  %xmm1,  %xmm0           # output permute
+       lea     .Lk_opt(%rip),  %r11    # prepare to output transform
+       add     \$32,   %rdx
+
+.Lschedule_mangle_last_dec:
+       add     \$-16,  %rdx
+       pxor    .Lk_s63(%rip),  %xmm0
+       call    _vpaes_schedule_transform # output transform
+       movdqu  %xmm0,  (%rdx)          # save last key
+
+       # cleanup
+       pxor    %xmm0,  %xmm0
+       pxor    %xmm1,  %xmm1
+       pxor    %xmm2,  %xmm2
+       pxor    %xmm3,  %xmm3
+       pxor    %xmm4,  %xmm4
+       pxor    %xmm5,  %xmm5
+       pxor    %xmm6,  %xmm6
+       pxor    %xmm7,  %xmm7
+       ret
+.size  _vpaes_schedule_core,.-_vpaes_schedule_core
+
+##
+##  .aes_schedule_192_smear
+##
+##  Smear the short, low side in the 192-bit key schedule.
+##
+##  Inputs:
+##    %xmm7: high side, b  a  x  y
+##    %xmm6:  low side, d  c  0  0
+##    %xmm13: 0
+##
+##  Outputs:
+##    %xmm6: b+c+d  b+c  0  0
+##    %xmm0: b+c+d  b+c  b  a
+##
+.type  _vpaes_schedule_192_smear,\@abi-omnipotent
+.align 16
+_vpaes_schedule_192_smear:
+       pshufd  \$0x80, %xmm6,  %xmm0   # d c 0 0 -> c 0 0 0
+       pxor    %xmm0,  %xmm6           # -> c+d c 0 0
+       pshufd  \$0xFE, %xmm7,  %xmm0   # b a _ _ -> b b b a
+       pxor    %xmm0,  %xmm6           # -> b+c+d b+c b a
+       movdqa  %xmm6,  %xmm0
+       pxor    %xmm1,  %xmm1
+       movhlps %xmm1,  %xmm6           # clobber low side with zeros
+       ret
+.size  _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
+
+##
+##  .aes_schedule_round
+##
+##  Runs one main round of the key schedule on %xmm0, %xmm7
+##
+##  Specifically, runs subbytes on the high dword of %xmm0
+##  then rotates it by one byte and xors into the low dword of
+##  %xmm7.
+##
+##  Adds rcon from low byte of %xmm8, then rotates %xmm8 for
+##  next rcon.
+##
+##  Smears the dwords of %xmm7 by xoring the low into the
+##  second low, result into third, result into highest.
+##
+##  Returns results in %xmm7 = %xmm0.
+##  Clobbers %xmm1-%xmm4, %r11.
+##
+.type  _vpaes_schedule_round,\@abi-omnipotent
+.align 16
+_vpaes_schedule_round:
+       # extract rcon from xmm8
+       pxor    %xmm1,  %xmm1
+       palignr \$15,   %xmm8,  %xmm1
+       palignr \$15,   %xmm8,  %xmm8
+       pxor    %xmm1,  %xmm7
+
+       # rotate
+       pshufd  \$0xFF, %xmm0,  %xmm0
+       palignr \$1,    %xmm0,  %xmm0
+       
+       # fall through...
+       
+       # low round: same as high round, but no rotation and no rcon.
+_vpaes_schedule_low_round:
+       # smear xmm7
+       movdqa  %xmm7,  %xmm1
+       pslldq  \$4,    %xmm7
+       pxor    %xmm1,  %xmm7
+       movdqa  %xmm7,  %xmm1
+       pslldq  \$8,    %xmm7
+       pxor    %xmm1,  %xmm7
+       pxor    .Lk_s63(%rip), %xmm7
+
+       # subbytes
+       movdqa  %xmm9,  %xmm1
+       pandn   %xmm0,  %xmm1
+       psrld   \$4,    %xmm1           # 1 = i
+       pand    %xmm9,  %xmm0           # 0 = k
+       movdqa  %xmm11, %xmm2           # 2 : a/k
+       pshufb  %xmm0,  %xmm2           # 2 = a/k
+       pxor    %xmm1,  %xmm0           # 0 = j
+       movdqa  %xmm10, %xmm3           # 3 : 1/i
+       pshufb  %xmm1,  %xmm3           # 3 = 1/i
+       pxor    %xmm2,  %xmm3           # 3 = iak = 1/i + a/k
+       movdqa  %xmm10, %xmm4           # 4 : 1/j
+       pshufb  %xmm0,  %xmm4           # 4 = 1/j
+       pxor    %xmm2,  %xmm4           # 4 = jak = 1/j + a/k
+       movdqa  %xmm10, %xmm2           # 2 : 1/iak
+       pshufb  %xmm3,  %xmm2           # 2 = 1/iak
+       pxor    %xmm0,  %xmm2           # 2 = io
+       movdqa  %xmm10, %xmm3           # 3 : 1/jak
+       pshufb  %xmm4,  %xmm3           # 3 = 1/jak
+       pxor    %xmm1,  %xmm3           # 3 = jo
+       movdqa  %xmm13, %xmm4           # 4 : sbou
+       pshufb  %xmm2,  %xmm4           # 4 = sbou
+       movdqa  %xmm12, %xmm0           # 0 : sbot
+       pshufb  %xmm3,  %xmm0           # 0 = sb1t
+       pxor    %xmm4,  %xmm0           # 0 = sbox output
+
+       # add in smeared stuff
+       pxor    %xmm7,  %xmm0   
+       movdqa  %xmm0,  %xmm7
+       ret
+.size  _vpaes_schedule_round,.-_vpaes_schedule_round
+
+##
+##  .aes_schedule_transform
+##
+##  Linear-transform %xmm0 according to tables at (%r11)
+##
+##  Requires that %xmm9 = 0x0F0F... as in preheat
+##  Output in %xmm0
+##  Clobbers %xmm1, %xmm2
+##
+.type  _vpaes_schedule_transform,\@abi-omnipotent
+.align 16
+_vpaes_schedule_transform:
+       movdqa  %xmm9,  %xmm1
+       pandn   %xmm0,  %xmm1
+       psrld   \$4,    %xmm1
+       pand    %xmm9,  %xmm0
+       movdqa  (%r11), %xmm2   # lo
+       pshufb  %xmm0,  %xmm2
+       movdqa  16(%r11), %xmm0 # hi
+       pshufb  %xmm1,  %xmm0
+       pxor    %xmm2,  %xmm0
+       ret
+.size  _vpaes_schedule_transform,.-_vpaes_schedule_transform
+
+##
+##  .aes_schedule_mangle
+##
+##  Mangle xmm0 from (basis-transformed) standard version
+##  to our version.
+##
+##  On encrypt,
+##    xor with 0x63
+##    multiply by circulant 0,1,1,1
+##    apply shiftrows transform
+##
+##  On decrypt,
+##    xor with 0x63
+##    multiply by "inverse mixcolumns" circulant E,B,D,9
+##    deskew
+##    apply shiftrows transform
+##
+##
+##  Writes out to (%rdx), and increments or decrements it
+##  Keeps track of round number mod 4 in %r8
+##  Preserves xmm0
+##  Clobbers xmm1-xmm5
+##
+.type  _vpaes_schedule_mangle,\@abi-omnipotent
+.align 16
+_vpaes_schedule_mangle:
+       movdqa  %xmm0,  %xmm4   # save xmm0 for later
+       movdqa  .Lk_mc_forward(%rip),%xmm5
+       test    %rcx,   %rcx
+       jnz     .Lschedule_mangle_dec
+
+       # encrypting
+       add     \$16,   %rdx
+       pxor    .Lk_s63(%rip),%xmm4
+       pshufb  %xmm5,  %xmm4
+       movdqa  %xmm4,  %xmm3
+       pshufb  %xmm5,  %xmm4
+       pxor    %xmm4,  %xmm3
+       pshufb  %xmm5,  %xmm4
+       pxor    %xmm4,  %xmm3
+
+       jmp     .Lschedule_mangle_both
+.align 16
+.Lschedule_mangle_dec:
+       # inverse mix columns
+       lea     .Lk_dksd(%rip),%r11
+       movdqa  %xmm9,  %xmm1
+       pandn   %xmm4,  %xmm1
+       psrld   \$4,    %xmm1   # 1 = hi
+       pand    %xmm9,  %xmm4   # 4 = lo
+
+       movdqa  0x00(%r11), %xmm2
+       pshufb  %xmm4,  %xmm2
+       movdqa  0x10(%r11), %xmm3
+       pshufb  %xmm1,  %xmm3
+       pxor    %xmm2,  %xmm3
+       pshufb  %xmm5,  %xmm3
+
+       movdqa  0x20(%r11), %xmm2
+       pshufb  %xmm4,  %xmm2
+       pxor    %xmm3,  %xmm2
+       movdqa  0x30(%r11), %xmm3
+       pshufb  %xmm1,  %xmm3
+       pxor    %xmm2,  %xmm3
+       pshufb  %xmm5,  %xmm3
+
+       movdqa  0x40(%r11), %xmm2
+       pshufb  %xmm4,  %xmm2
+       pxor    %xmm3,  %xmm2
+       movdqa  0x50(%r11), %xmm3
+       pshufb  %xmm1,  %xmm3
+       pxor    %xmm2,  %xmm3
+       pshufb  %xmm5,  %xmm3
+
+       movdqa  0x60(%r11), %xmm2
+       pshufb  %xmm4,  %xmm2
+       pxor    %xmm3,  %xmm2
+       movdqa  0x70(%r11), %xmm3
+       pshufb  %xmm1,  %xmm3
+       pxor    %xmm2,  %xmm3
+
+       add     \$-16,  %rdx
+
+.Lschedule_mangle_both:
+       movdqa  (%r8,%r10),%xmm1
+       pshufb  %xmm1,%xmm3
+       add     \$-16,  %r8
+       and     \$0x30, %r8
+       movdqu  %xmm3,  (%rdx)
+       ret
+.size  _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
+
+#
+# Interface to OpenSSL
+#
+.globl ${PREFIX}_set_encrypt_key
+.type  ${PREFIX}_set_encrypt_key,\@function,3
+.align 16
+${PREFIX}_set_encrypt_key:
+___
+$code.=<<___ if ($win64);
+       lea     -0xb8(%rsp),%rsp
+       movaps  %xmm6,0x10(%rsp)
+       movaps  %xmm7,0x20(%rsp)
+       movaps  %xmm8,0x30(%rsp)
+       movaps  %xmm9,0x40(%rsp)
+       movaps  %xmm10,0x50(%rsp)
+       movaps  %xmm11,0x60(%rsp)
+       movaps  %xmm12,0x70(%rsp)
+       movaps  %xmm13,0x80(%rsp)
+       movaps  %xmm14,0x90(%rsp)
+       movaps  %xmm15,0xa0(%rsp)
+.Lenc_key_body:
+___
+$code.=<<___;
+       mov     %esi,%eax
+       shr     \$5,%eax
+       add     \$5,%eax
+       mov     %eax,240(%rdx)  # AES_KEY->rounds = nbits/32+5;
+
+       mov     \$0,%ecx
+       mov     \$0x30,%r8d
+       call    _vpaes_schedule_core
+___
+$code.=<<___ if ($win64);
+       movaps  0x10(%rsp),%xmm6
+       movaps  0x20(%rsp),%xmm7
+       movaps  0x30(%rsp),%xmm8
+       movaps  0x40(%rsp),%xmm9
+       movaps  0x50(%rsp),%xmm10
+       movaps  0x60(%rsp),%xmm11
+       movaps  0x70(%rsp),%xmm12
+       movaps  0x80(%rsp),%xmm13
+       movaps  0x90(%rsp),%xmm14
+       movaps  0xa0(%rsp),%xmm15
+       lea     0xb8(%rsp),%rsp
+.Lenc_key_epilogue:
+___
+$code.=<<___;
+       xor     %eax,%eax
+       ret
+.size  ${PREFIX}_set_encrypt_key,.-${PREFIX}_set_encrypt_key
+
+.globl ${PREFIX}_set_decrypt_key
+.type  ${PREFIX}_set_decrypt_key,\@function,3
+.align 16
+${PREFIX}_set_decrypt_key:
+___
+$code.=<<___ if ($win64);
+       lea     -0xb8(%rsp),%rsp
+       movaps  %xmm6,0x10(%rsp)
+       movaps  %xmm7,0x20(%rsp)
+       movaps  %xmm8,0x30(%rsp)
+       movaps  %xmm9,0x40(%rsp)
+       movaps  %xmm10,0x50(%rsp)
+       movaps  %xmm11,0x60(%rsp)
+       movaps  %xmm12,0x70(%rsp)
+       movaps  %xmm13,0x80(%rsp)
+       movaps  %xmm14,0x90(%rsp)
+       movaps  %xmm15,0xa0(%rsp)
+.Ldec_key_body:
+___
+$code.=<<___;
+       mov     %esi,%eax
+       shr     \$5,%eax
+       add     \$5,%eax
+       mov     %eax,240(%rdx)  # AES_KEY->rounds = nbits/32+5;
+       shl     \$4,%eax
+       lea     16(%rdx,%rax),%rdx
+
+       mov     \$1,%ecx
+       mov     %esi,%r8d
+       shr     \$1,%r8d
+       and     \$32,%r8d
+       xor     \$32,%r8d       # nbits==192?0:32
+       call    _vpaes_schedule_core
+___
+$code.=<<___ if ($win64);
+       movaps  0x10(%rsp),%xmm6
+       movaps  0x20(%rsp),%xmm7
+       movaps  0x30(%rsp),%xmm8
+       movaps  0x40(%rsp),%xmm9
+       movaps  0x50(%rsp),%xmm10
+       movaps  0x60(%rsp),%xmm11
+       movaps  0x70(%rsp),%xmm12
+       movaps  0x80(%rsp),%xmm13
+       movaps  0x90(%rsp),%xmm14
+       movaps  0xa0(%rsp),%xmm15
+       lea     0xb8(%rsp),%rsp
+.Ldec_key_epilogue:
+___
+$code.=<<___;
+       xor     %eax,%eax
+       ret
+.size  ${PREFIX}_set_decrypt_key,.-${PREFIX}_set_decrypt_key
+
+.globl ${PREFIX}_encrypt
+.type  ${PREFIX}_encrypt,\@function,3
+.align 16
+${PREFIX}_encrypt:
+___
+$code.=<<___ if ($win64);
+       lea     -0xb8(%rsp),%rsp
+       movaps  %xmm6,0x10(%rsp)
+       movaps  %xmm7,0x20(%rsp)
+       movaps  %xmm8,0x30(%rsp)
+       movaps  %xmm9,0x40(%rsp)
+       movaps  %xmm10,0x50(%rsp)
+       movaps  %xmm11,0x60(%rsp)
+       movaps  %xmm12,0x70(%rsp)
+       movaps  %xmm13,0x80(%rsp)
+       movaps  %xmm14,0x90(%rsp)
+       movaps  %xmm15,0xa0(%rsp)
+.Lenc_body:
+___
+$code.=<<___;
+       movdqu  (%rdi),%xmm0
+       call    _vpaes_preheat
+       call    _vpaes_encrypt_core
+       movdqu  %xmm0,(%rsi)
+___
+$code.=<<___ if ($win64);
+       movaps  0x10(%rsp),%xmm6
+       movaps  0x20(%rsp),%xmm7
+       movaps  0x30(%rsp),%xmm8
+       movaps  0x40(%rsp),%xmm9
+       movaps  0x50(%rsp),%xmm10
+       movaps  0x60(%rsp),%xmm11
+       movaps  0x70(%rsp),%xmm12
+       movaps  0x80(%rsp),%xmm13
+       movaps  0x90(%rsp),%xmm14
+       movaps  0xa0(%rsp),%xmm15
+       lea     0xb8(%rsp),%rsp
+.Lenc_epilogue:
+___
+$code.=<<___;
+       ret
+.size  ${PREFIX}_encrypt,.-${PREFIX}_encrypt
+
+.globl ${PREFIX}_decrypt
+.type  ${PREFIX}_decrypt,\@function,3
+.align 16
+${PREFIX}_decrypt:
+___
+$code.=<<___ if ($win64);
+       lea     -0xb8(%rsp),%rsp
+       movaps  %xmm6,0x10(%rsp)
+       movaps  %xmm7,0x20(%rsp)
+       movaps  %xmm8,0x30(%rsp)
+       movaps  %xmm9,0x40(%rsp)
+       movaps  %xmm10,0x50(%rsp)
+       movaps  %xmm11,0x60(%rsp)
+       movaps  %xmm12,0x70(%rsp)
+       movaps  %xmm13,0x80(%rsp)
+       movaps  %xmm14,0x90(%rsp)
+       movaps  %xmm15,0xa0(%rsp)
+.Ldec_body:
+___
+$code.=<<___;
+       movdqu  (%rdi),%xmm0
+       call    _vpaes_preheat
+       call    _vpaes_decrypt_core
+       movdqu  %xmm0,(%rsi)
+___
+$code.=<<___ if ($win64);
+       movaps  0x10(%rsp),%xmm6
+       movaps  0x20(%rsp),%xmm7
+       movaps  0x30(%rsp),%xmm8
+       movaps  0x40(%rsp),%xmm9
+       movaps  0x50(%rsp),%xmm10
+       movaps  0x60(%rsp),%xmm11
+       movaps  0x70(%rsp),%xmm12
+       movaps  0x80(%rsp),%xmm13
+       movaps  0x90(%rsp),%xmm14
+       movaps  0xa0(%rsp),%xmm15
+       lea     0xb8(%rsp),%rsp
+.Ldec_epilogue:
+___
+$code.=<<___;
+       ret
+.size  ${PREFIX}_decrypt,.-${PREFIX}_decrypt
+___
+{
+my ($inp,$out,$len,$key,$ivp,$enc)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9");
+# void AES_cbc_encrypt (const void char *inp, unsigned char *out,
+#                       size_t length, const AES_KEY *key,
+#                       unsigned char *ivp,const int enc);
+$code.=<<___;
+.globl ${PREFIX}_cbc_encrypt
+.type  ${PREFIX}_cbc_encrypt,\@function,6
+.align 16
+${PREFIX}_cbc_encrypt:
+       xchg    $key,$len
+___
+($len,$key)=($key,$len);
+$code.=<<___;
+___
+$code.=<<___ if ($win64);
+       lea     -0xb8(%rsp),%rsp
+       movaps  %xmm6,0x10(%rsp)
+       movaps  %xmm7,0x20(%rsp)
+       movaps  %xmm8,0x30(%rsp)
+       movaps  %xmm9,0x40(%rsp)
+       movaps  %xmm10,0x50(%rsp)
+       movaps  %xmm11,0x60(%rsp)
+       movaps  %xmm12,0x70(%rsp)
+       movaps  %xmm13,0x80(%rsp)
+       movaps  %xmm14,0x90(%rsp)
+       movaps  %xmm15,0xa0(%rsp)
+.Lcbc_body:
+___
+$code.=<<___;
+       movdqu  ($ivp),%xmm6            # load IV
+       sub     $inp,$out
+       sub     \$16,$len
+       call    _vpaes_preheat
+       cmp     \$0,${enc}d
+       je      .Lcbc_dec_loop
+       jmp     .Lcbc_enc_loop
+.align 16
+.Lcbc_enc_loop:
+       movdqu  ($inp),%xmm0
+       pxor    %xmm6,%xmm0
+       call    _vpaes_encrypt_core
+       movdqa  %xmm0,%xmm6
+       movdqu  %xmm0,($out,$inp)
+       lea     16($inp),$inp
+       sub     \$16,$len
+       jnc     .Lcbc_enc_loop
+       jmp     .Lcbc_done
+.align 16
+.Lcbc_dec_loop:
+       movdqu  ($inp),%xmm0
+       movdqa  %xmm0,%xmm7
+       call    _vpaes_decrypt_core
+       pxor    %xmm6,%xmm0
+       movdqa  %xmm7,%xmm6
+       movdqu  %xmm0,($out,$inp)
+       lea     16($inp),$inp
+       sub     \$16,$len
+       jnc     .Lcbc_dec_loop
+.Lcbc_done:
+       movdqu  %xmm6,($ivp)            # save IV
+___
+$code.=<<___ if ($win64);
+       movaps  0x10(%rsp),%xmm6
+       movaps  0x20(%rsp),%xmm7
+       movaps  0x30(%rsp),%xmm8
+       movaps  0x40(%rsp),%xmm9
+       movaps  0x50(%rsp),%xmm10
+       movaps  0x60(%rsp),%xmm11
+       movaps  0x70(%rsp),%xmm12
+       movaps  0x80(%rsp),%xmm13
+       movaps  0x90(%rsp),%xmm14
+       movaps  0xa0(%rsp),%xmm15
+       lea     0xb8(%rsp),%rsp
+.Lcbc_epilogue:
+___
+$code.=<<___;
+       ret
+.size  ${PREFIX}_cbc_encrypt,.-${PREFIX}_cbc_encrypt
+___
+}
+$code.=<<___;
+##
+##  _aes_preheat
+##
+##  Fills register %r10 -> .aes_consts (so you can -fPIC)
+##  and %xmm9-%xmm15 as specified below.
+##
+.type  _vpaes_preheat,\@abi-omnipotent
+.align 16
+_vpaes_preheat:
+       lea     .Lk_s0F(%rip), %r10
+       movdqa  -0x20(%r10), %xmm10     # .Lk_inv
+       movdqa  -0x10(%r10), %xmm11     # .Lk_inv+16
+       movdqa  0x00(%r10), %xmm9       # .Lk_s0F
+       movdqa  0x30(%r10), %xmm13      # .Lk_sb1
+       movdqa  0x40(%r10), %xmm12      # .Lk_sb1+16
+       movdqa  0x50(%r10), %xmm15      # .Lk_sb2
+       movdqa  0x60(%r10), %xmm14      # .Lk_sb2+16
+       ret
+.size  _vpaes_preheat,.-_vpaes_preheat
+########################################################
+##                                                    ##
+##                     Constants                      ##
+##                                                    ##
+########################################################
+.type  _vpaes_consts,\@object
+.align 64
+_vpaes_consts:
+.Lk_inv:       # inv, inva
+       .quad   0x0E05060F0D080180, 0x040703090A0B0C02
+       .quad   0x01040A060F0B0780, 0x030D0E0C02050809
+
+.Lk_s0F:       # s0F
+       .quad   0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F
+
+.Lk_ipt:       # input transform (lo, hi)
+       .quad   0xC2B2E8985A2A7000, 0xCABAE09052227808
+       .quad   0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
+
+.Lk_sb1:       # sb1u, sb1t
+       .quad   0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
+       .quad   0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
+.Lk_sb2:       # sb2u, sb2t
+       .quad   0xE27A93C60B712400, 0x5EB7E955BC982FCD
+       .quad   0x69EB88400AE12900, 0xC2A163C8AB82234A
+.Lk_sbo:       # sbou, sbot
+       .quad   0xD0D26D176FBDC700, 0x15AABF7AC502A878
+       .quad   0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
+
+.Lk_mc_forward:        # mc_forward
+       .quad   0x0407060500030201, 0x0C0F0E0D080B0A09
+       .quad   0x080B0A0904070605, 0x000302010C0F0E0D
+       .quad   0x0C0F0E0D080B0A09, 0x0407060500030201
+       .quad   0x000302010C0F0E0D, 0x080B0A0904070605
+
+.Lk_mc_backward:# mc_backward
+       .quad   0x0605040702010003, 0x0E0D0C0F0A09080B
+       .quad   0x020100030E0D0C0F, 0x0A09080B06050407
+       .quad   0x0E0D0C0F0A09080B, 0x0605040702010003
+       .quad   0x0A09080B06050407, 0x020100030E0D0C0F
+
+.Lk_sr:                # sr
+       .quad   0x0706050403020100, 0x0F0E0D0C0B0A0908
+       .quad   0x030E09040F0A0500, 0x0B06010C07020D08
+       .quad   0x0F060D040B020900, 0x070E050C030A0108
+       .quad   0x0B0E0104070A0D00, 0x0306090C0F020508
+
+.Lk_rcon:      # rcon
+       .quad   0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
+
+.Lk_s63:       # s63: all equal to 0x63 transformed
+       .quad   0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B
+
+.Lk_opt:       # output transform
+       .quad   0xFF9F4929D6B66000, 0xF7974121DEBE6808
+       .quad   0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
+
+.Lk_deskew:    # deskew tables: inverts the sbox's "skew"
+       .quad   0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
+       .quad   0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
+
+##
+##  Decryption stuff
+##  Key schedule constants
+##
+.Lk_dksd:      # decryption key schedule: invskew x*D
+       .quad   0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
+       .quad   0x41C277F4B5368300, 0x5FDC69EAAB289D1E
+.Lk_dksb:      # decryption key schedule: invskew x*B
+       .quad   0x9A4FCA1F8550D500, 0x03D653861CC94C99
+       .quad   0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
+.Lk_dkse:      # decryption key schedule: invskew x*E + 0x63
+       .quad   0xD5031CCA1FC9D600, 0x53859A4C994F5086
+       .quad   0xA23196054FDC7BE8, 0xCD5EF96A20B31487
+.Lk_dks9:      # decryption key schedule: invskew x*9
+       .quad   0xB6116FC87ED9A700, 0x4AED933482255BFC
+       .quad   0x4576516227143300, 0x8BB89FACE9DAFDCE
+
+##
+##  Decryption stuff
+##  Round function constants
+##
+.Lk_dipt:      # decryption input transform
+       .quad   0x0F505B040B545F00, 0x154A411E114E451A
+       .quad   0x86E383E660056500, 0x12771772F491F194
+
+.Lk_dsb9:      # decryption sbox output *9*u, *9*t
+       .quad   0x851C03539A86D600, 0xCAD51F504F994CC9
+       .quad   0xC03B1789ECD74900, 0x725E2C9EB2FBA565
+.Lk_dsbd:      # decryption sbox output *D*u, *D*t
+       .quad   0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
+       .quad   0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
+.Lk_dsbb:      # decryption sbox output *B*u, *B*t
+       .quad   0xD022649296B44200, 0x602646F6B0F2D404
+       .quad   0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
+.Lk_dsbe:      # decryption sbox output *E*u, *E*t
+       .quad   0x46F2929626D4D000, 0x2242600464B4F6B0
+       .quad   0x0C55A6CDFFAAC100, 0x9467F36B98593E32
+.Lk_dsbo:      # decryption sbox final output
+       .quad   0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
+       .quad   0x12D7560F93441D00, 0xCA4B8159D8C58E9C
+.asciz "Vector Permutaion AES for x86_64, Mike Hamburg (Stanford University)"
+.align 64
+.size  _vpaes_consts,.-_vpaes_consts
+___
+
+if ($win64) {
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#              CONTEXT *context,DISPATCHER_CONTEXT *disp)
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern        __imp_RtlVirtualUnwind
+.type  se_handler,\@abi-omnipotent
+.align 16
+se_handler:
+       push    %rsi
+       push    %rdi
+       push    %rbx
+       push    %rbp
+       push    %r12
+       push    %r13
+       push    %r14
+       push    %r15
+       pushfq
+       sub     \$64,%rsp
+
+       mov     120($context),%rax      # pull context->Rax
+       mov     248($context),%rbx      # pull context->Rip
+
+       mov     8($disp),%rsi           # disp->ImageBase
+       mov     56($disp),%r11          # disp->HandlerData
+
+       mov     0(%r11),%r10d           # HandlerData[0]
+       lea     (%rsi,%r10),%r10        # prologue label
+       cmp     %r10,%rbx               # context->Rip<prologue label
+       jb      .Lin_prologue
+
+       mov     152($context),%rax      # pull context->Rsp
+
+       mov     4(%r11),%r10d           # HandlerData[1]
+       lea     (%rsi,%r10),%r10        # epilogue label
+       cmp     %r10,%rbx               # context->Rip>=epilogue label
+       jae     .Lin_prologue
+
+       lea     16(%rax),%rsi           # %xmm save area
+       lea     512($context),%rdi      # &context.Xmm6
+       mov     \$20,%ecx               # 10*sizeof(%xmm0)/sizeof(%rax)
+       .long   0xa548f3fc              # cld; rep movsq
+       lea     0xb8(%rax),%rax         # adjust stack pointer
+
+.Lin_prologue:
+       mov     8(%rax),%rdi
+       mov     16(%rax),%rsi
+       mov     %rax,152($context)      # restore context->Rsp
+       mov     %rsi,168($context)      # restore context->Rsi
+       mov     %rdi,176($context)      # restore context->Rdi
+
+       mov     40($disp),%rdi          # disp->ContextRecord
+       mov     $context,%rsi           # context
+       mov     \$`1232/8`,%ecx         # sizeof(CONTEXT)
+       .long   0xa548f3fc              # cld; rep movsq
+
+       mov     $disp,%rsi
+       xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
+       mov     8(%rsi),%rdx            # arg2, disp->ImageBase
+       mov     0(%rsi),%r8             # arg3, disp->ControlPc
+       mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
+       mov     40(%rsi),%r10           # disp->ContextRecord
+       lea     56(%rsi),%r11           # &disp->HandlerData
+       lea     24(%rsi),%r12           # &disp->EstablisherFrame
+       mov     %r10,32(%rsp)           # arg5
+       mov     %r11,40(%rsp)           # arg6
+       mov     %r12,48(%rsp)           # arg7
+       mov     %rcx,56(%rsp)           # arg8, (NULL)
+       call    *__imp_RtlVirtualUnwind(%rip)
+
+       mov     \$1,%eax                # ExceptionContinueSearch
+       add     \$64,%rsp
+       popfq
+       pop     %r15
+       pop     %r14
+       pop     %r13
+       pop     %r12
+       pop     %rbp
+       pop     %rbx
+       pop     %rdi
+       pop     %rsi
+       ret
+.size  se_handler,.-se_handler
+
+.section       .pdata
+.align 4
+       .rva    .LSEH_begin_${PREFIX}_set_encrypt_key
+       .rva    .LSEH_end_${PREFIX}_set_encrypt_key
+       .rva    .LSEH_info_${PREFIX}_set_encrypt_key
+
+       .rva    .LSEH_begin_${PREFIX}_set_decrypt_key
+       .rva    .LSEH_end_${PREFIX}_set_decrypt_key
+       .rva    .LSEH_info_${PREFIX}_set_decrypt_key
+
+       .rva    .LSEH_begin_${PREFIX}_encrypt
+       .rva    .LSEH_end_${PREFIX}_encrypt
+       .rva    .LSEH_info_${PREFIX}_encrypt
+
+       .rva    .LSEH_begin_${PREFIX}_decrypt
+       .rva    .LSEH_end_${PREFIX}_decrypt
+       .rva    .LSEH_info_${PREFIX}_decrypt
+
+       .rva    .LSEH_begin_${PREFIX}_cbc_encrypt
+       .rva    .LSEH_end_${PREFIX}_cbc_encrypt
+       .rva    .LSEH_info_${PREFIX}_cbc_encrypt
+
+.section       .xdata
+.align 8
+.LSEH_info_${PREFIX}_set_encrypt_key:
+       .byte   9,0,0,0
+       .rva    se_handler
+       .rva    .Lenc_key_body,.Lenc_key_epilogue       # HandlerData[]
+.LSEH_info_${PREFIX}_set_decrypt_key:
+       .byte   9,0,0,0
+       .rva    se_handler
+       .rva    .Ldec_key_body,.Ldec_key_epilogue       # HandlerData[]
+.LSEH_info_${PREFIX}_encrypt:
+       .byte   9,0,0,0
+       .rva    se_handler
+       .rva    .Lenc_body,.Lenc_epilogue               # HandlerData[]
+.LSEH_info_${PREFIX}_decrypt:
+       .byte   9,0,0,0
+       .rva    se_handler
+       .rva    .Ldec_body,.Ldec_epilogue               # HandlerData[]
+.LSEH_info_${PREFIX}_cbc_encrypt:
+       .byte   9,0,0,0
+       .rva    se_handler
+       .rva    .Lcbc_body,.Lcbc_epilogue               # HandlerData[]
+___
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+print $code;
+
+close STDOUT;
index fcb0ce0..cafacb8 100755 (executable)
@@ -666,14 +666,14 @@ my %regrm = (     "%eax"=>0, "%ecx"=>1, "%edx"=>2, "%ebx"=>3,
 my $movq = sub {       # elderly gas can't handle inter-register movq
   my $arg = shift;
   my @opcode=(0x66);
-    if ($arg =~ /%xmm([0-9]+),%r(\w+)/) {
+    if ($arg =~ /%xmm([0-9]+),\s*%r(\w+)/) {
        my ($src,$dst)=($1,$2);
        if ($dst !~ /[0-9]+/)   { $dst = $regrm{"%e$dst"}; }
        rex(\@opcode,$src,$dst,0x8);
        push @opcode,0x0f,0x7e;
        push @opcode,0xc0|(($src&7)<<3)|($dst&7);       # ModR/M
        @opcode;
-    } elsif ($arg =~ /%r(\w+),%xmm([0-9]+)/) {
+    } elsif ($arg =~ /%r(\w+),\s*%xmm([0-9]+)/) {
        my ($src,$dst)=($2,$1);
        if ($dst !~ /[0-9]+/)   { $dst = $regrm{"%e$dst"}; }
        rex(\@opcode,$src,$dst,0x8);
@@ -686,7 +686,7 @@ my $movq = sub {    # elderly gas can't handle inter-register movq
 };
 
 my $pextrd = sub {
-    if (shift =~ /\$([0-9]+),%xmm([0-9]+),(%\w+)/) {
+    if (shift =~ /\$([0-9]+),\s*%xmm([0-9]+),\s*(%\w+)/) {
       my @opcode=(0x66);
        $imm=$1;
        $src=$2;
@@ -704,7 +704,7 @@ my $pextrd = sub {
 };
 
 my $pinsrd = sub {
-    if (shift =~ /\$([0-9]+),(%\w+),%xmm([0-9]+)/) {
+    if (shift =~ /\$([0-9]+),\s*(%\w+),\s*%xmm([0-9]+)/) {
       my @opcode=(0x66);
        $imm=$1;
        $src=$2;
@@ -722,7 +722,7 @@ my $pinsrd = sub {
 };
 
 my $pshufb = sub {
-    if (shift =~ /%xmm([0-9]+),%xmm([0-9]+)/) {
+    if (shift =~ /%xmm([0-9]+),\s*%xmm([0-9]+)/) {
       my @opcode=(0x66);
        rex(\@opcode,$2,$1);
        push @opcode,0x0f,0x38,0x00;
@@ -734,7 +734,7 @@ my $pshufb = sub {
 };
 
 my $palignr = sub {
-    if (shift =~ /\$([0-9]+),%xmm([0-9]+),%xmm([0-9]+)/) {
+    if (shift =~ /\$([0-9]+),\s*%xmm([0-9]+),\s*%xmm([0-9]+)/) {
       my @opcode=(0x66);
        rex(\@opcode,$3,$2);
        push @opcode,0x0f,0x3a,0x0f;