e6b9663c13ad91bc1727a11ca6ec73e8bb27172b
[openssl.git] / crypto / modes / asm / ghash-x86.pl
1 #!/usr/bin/env perl
2 #
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9 #
10 # March, May, June 2010
11 #
12 # The module implements "4-bit" GCM GHASH function and underlying
13 # single multiplication operation in GF(2^128). "4-bit" means that it
14 # uses 256 bytes per-key table [+64/128 bytes fixed table]. It has two
15 # code paths: vanilla x86 and vanilla SSE. Former will be executed on
16 # 486 and Pentium, latter on all others. SSE GHASH features so called
17 # "528B" variant of "4-bit" method utilizing additional 256+16 bytes
18 # of per-key storage [+512 bytes shared table]. Performance results
19 # are for streamed GHASH subroutine and are expressed in cycles per
20 # processed byte, less is better:
21 #
22 #               gcc 2.95.3(*)   SSE assembler   x86 assembler
23 #
24 # Pentium       105/111(**)     -               50
25 # PIII          68 /75          12.2            24
26 # P4            125/125         17.8            84(***)
27 # Opteron       66 /70          10.1            30
28 # Core2         54 /67          8.4             18
29 # Atom          105/105         16.8            53
30 # VIA Nano      69 /71          13.0            27
31 #
32 # (*)   gcc 3.4.x was observed to generate few percent slower code,
33 #       which is one of reasons why 2.95.3 results were chosen,
34 #       another reason is lack of 3.4.x results for older CPUs;
35 #       comparison with SSE results is not completely fair, because C
36 #       results are for vanilla "256B" implementation, while
37 #       assembler results are for "528B";-)
38 # (**)  second number is result for code compiled with -fPIC flag,
39 #       which is actually more relevant, because assembler code is
40 #       position-independent;
41 # (***) see comment in non-MMX routine for further details;
42 #
43 # To summarize, it's >2-5 times faster than gcc-generated code. To
44 # anchor it to something else SHA1 assembler processes one byte in
45 # ~7 cycles on contemporary x86 cores. As for choice of MMX/SSE
46 # in particular, see comment at the end of the file...
47
48 # May 2010
49 #
50 # Add PCLMULQDQ version performing at 2.10 cycles per processed byte.
51 # The question is how close is it to theoretical limit? The pclmulqdq
52 # instruction latency appears to be 14 cycles and there can't be more
53 # than 2 of them executing at any given time. This means that single
54 # Karatsuba multiplication would take 28 cycles *plus* few cycles for
55 # pre- and post-processing. Then multiplication has to be followed by
56 # modulo-reduction. Given that aggregated reduction method [see
57 # "Carry-less Multiplication and Its Usage for Computing the GCM Mode"
58 # white paper by Intel] allows you to perform reduction only once in
59 # a while we can assume that asymptotic performance can be estimated
60 # as (28+Tmod/Naggr)/16, where Tmod is time to perform reduction
61 # and Naggr is the aggregation factor.
62 #
63 # Before we proceed to this implementation let's have closer look at
64 # the best-performing code suggested by Intel in their white paper.
65 # By tracing inter-register dependencies Tmod is estimated as ~19
66 # cycles and Naggr chosen by Intel is 4, resulting in 2.05 cycles per
67 # processed byte. As implied, this is quite optimistic estimate,
68 # because it does not account for Karatsuba pre- and post-processing,
69 # which for a single multiplication is ~5 cycles. Unfortunately Intel
70 # does not provide performance data for GHASH alone. But benchmarking
71 # AES_GCM_encrypt ripped out of Fig. 15 of the white paper with aadt
72 # alone resulted in 2.46 cycles per byte of out 16KB buffer. Note that
73 # the result accounts even for pre-computing of degrees of the hash
74 # key H, but its portion is negligible at 16KB buffer size.
75 #
76 # Moving on to the implementation in question. Tmod is estimated as
77 # ~13 cycles and Naggr is 2, giving asymptotic performance of ...
78 # 2.16. How is it possible that measured performance is better than
79 # optimistic theoretical estimate? There is one thing Intel failed
80 # to recognize. By serializing GHASH with CTR in same subroutine
81 # former's performance is really limited to above (Tmul + Tmod/Naggr)
82 # equation. But if GHASH procedure is detached, the modulo-reduction
83 # can be interleaved with Naggr-1 multiplications at instruction level
84 # and under ideal conditions even disappear from the equation. So that
85 # optimistic theoretical estimate for this implementation is ...
86 # 28/16=1.75, and not 2.16. Well, it's probably way too optimistic,
87 # at least for such small Naggr. I'd argue that (28+Tproc/Naggr),
88 # where Tproc is time required for Karatsuba pre- and post-processing,
89 # is more realistic estimate. In this case it gives ... 1.91 cycles.
90 # Or in other words, depending on how well we can interleave reduction
91 # and one of the two multiplications the performance should be betwen
92 # 1.91 and 2.16. As already mentioned, this implementation processes
93 # one byte out of 8KB buffer in 2.10 cycles, while x86_64 counterpart
94 # - in 2.02. x86_64 performance is better, because larger register
95 # bank allows to interleave reduction and multiplication better.
96 #
97 # Does it make sense to increase Naggr? To start with it's virtually
98 # impossible in 32-bit mode, because of limited register bank
99 # capacity. Otherwise improvement has to be weighed agiainst slower
100 # setup, as well as code size and complexity increase. As even
101 # optimistic estimate doesn't promise 30% performance improvement,
102 # there are currently no plans to increase Naggr.
103 #
104 # Special thanks to David Woodhouse <dwmw2@infradead.org> for
105 # providing access to a Westmere-based system on behalf of Intel
106 # Open Source Technology Centre.
107
108 # January 2010
109 #
110 # Tweaked to optimize transitions between integer and FP operations
111 # on same XMM register, PCLMULQDQ subroutine was measured to process
112 # one byte in 2.07 cycles on Sandy Bridge, and in 2.12 - on Westmere.
113 # The minor regression on Westmere is outweighed by ~15% improvement
114 # on Sandy Bridge. Strangely enough attempt to modify 64-bit code in
115 # similar manner resulted in almost 20% degradation on Sandy Bridge,
116 # where original 64-bit code processes one byte in 1.95 cycles.
117
118 #####################################################################
119 # For reference, AMD Bulldozer processes one byte in 1.98 cycles in
120 # 32-bit mode and 1.89 in 64-bit.
121
122 # February 2013
123 #
124 # Overhaul: aggregate Karatsuba post-processing, improve ILP in
125 # reduction_alg9. Resulting performance is 1.96 cycles per byte on
126 # Westmere, 1.95 - on Sandy/Ivy Bridge, 1.76 - on Bulldozer.
127
128 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
129 push(@INC,"${dir}","${dir}../../perlasm");
130 require "x86asm.pl";
131
132 &asm_init($ARGV[0],"ghash-x86.pl",$x86only = $ARGV[$#ARGV] eq "386");
133
134 $sse2=0;
135 for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
136
137 ($Zhh,$Zhl,$Zlh,$Zll) = ("ebp","edx","ecx","ebx");
138 $inp  = "edi";
139 $Htbl = "esi";
140 \f
141 $unroll = 0;    # Affects x86 loop. Folded loop performs ~7% worse
142                 # than unrolled, which has to be weighted against
143                 # 2.5x x86-specific code size reduction.
144
145 sub x86_loop {
146     my $off = shift;
147     my $rem = "eax";
148
149         &mov    ($Zhh,&DWP(4,$Htbl,$Zll));
150         &mov    ($Zhl,&DWP(0,$Htbl,$Zll));
151         &mov    ($Zlh,&DWP(12,$Htbl,$Zll));
152         &mov    ($Zll,&DWP(8,$Htbl,$Zll));
153         &xor    ($rem,$rem);    # avoid partial register stalls on PIII
154
155         # shrd practically kills P4, 2.5x deterioration, but P4 has
156         # MMX code-path to execute. shrd runs tad faster [than twice
157         # the shifts, move's and or's] on pre-MMX Pentium (as well as
158         # PIII and Core2), *but* minimizes code size, spares register
159         # and thus allows to fold the loop...
160         if (!$unroll) {
161         my $cnt = $inp;
162         &mov    ($cnt,15);
163         &jmp    (&label("x86_loop"));
164         &set_label("x86_loop",16);
165             for($i=1;$i<=2;$i++) {
166                 &mov    (&LB($rem),&LB($Zll));
167                 &shrd   ($Zll,$Zlh,4);
168                 &and    (&LB($rem),0xf);
169                 &shrd   ($Zlh,$Zhl,4);
170                 &shrd   ($Zhl,$Zhh,4);
171                 &shr    ($Zhh,4);
172                 &xor    ($Zhh,&DWP($off+16,"esp",$rem,4));
173
174                 &mov    (&LB($rem),&BP($off,"esp",$cnt));
175                 if ($i&1) {
176                         &and    (&LB($rem),0xf0);
177                 } else {
178                         &shl    (&LB($rem),4);
179                 }
180
181                 &xor    ($Zll,&DWP(8,$Htbl,$rem));
182                 &xor    ($Zlh,&DWP(12,$Htbl,$rem));
183                 &xor    ($Zhl,&DWP(0,$Htbl,$rem));
184                 &xor    ($Zhh,&DWP(4,$Htbl,$rem));
185
186                 if ($i&1) {
187                         &dec    ($cnt);
188                         &js     (&label("x86_break"));
189                 } else {
190                         &jmp    (&label("x86_loop"));
191                 }
192             }
193         &set_label("x86_break",16);
194         } else {
195             for($i=1;$i<32;$i++) {
196                 &comment($i);
197                 &mov    (&LB($rem),&LB($Zll));
198                 &shrd   ($Zll,$Zlh,4);
199                 &and    (&LB($rem),0xf);
200                 &shrd   ($Zlh,$Zhl,4);
201                 &shrd   ($Zhl,$Zhh,4);
202                 &shr    ($Zhh,4);
203                 &xor    ($Zhh,&DWP($off+16,"esp",$rem,4));
204
205                 if ($i&1) {
206                         &mov    (&LB($rem),&BP($off+15-($i>>1),"esp"));
207                         &and    (&LB($rem),0xf0);
208                 } else {
209                         &mov    (&LB($rem),&BP($off+15-($i>>1),"esp"));
210                         &shl    (&LB($rem),4);
211                 }
212
213                 &xor    ($Zll,&DWP(8,$Htbl,$rem));
214                 &xor    ($Zlh,&DWP(12,$Htbl,$rem));
215                 &xor    ($Zhl,&DWP(0,$Htbl,$rem));
216                 &xor    ($Zhh,&DWP(4,$Htbl,$rem));
217             }
218         }
219         &bswap  ($Zll);
220         &bswap  ($Zlh);
221         &bswap  ($Zhl);
222         if (!$x86only) {
223                 &bswap  ($Zhh);
224         } else {
225                 &mov    ("eax",$Zhh);
226                 &bswap  ("eax");
227                 &mov    ($Zhh,"eax");
228         }
229 }
230
231 if ($unroll) {
232     &function_begin_B("_x86_gmult_4bit_inner");
233         &x86_loop(4);
234         &ret    ();
235     &function_end_B("_x86_gmult_4bit_inner");
236 }
237
238 sub deposit_rem_4bit {
239     my $bias = shift;
240
241         &mov    (&DWP($bias+0, "esp"),0x0000<<16);
242         &mov    (&DWP($bias+4, "esp"),0x1C20<<16);
243         &mov    (&DWP($bias+8, "esp"),0x3840<<16);
244         &mov    (&DWP($bias+12,"esp"),0x2460<<16);
245         &mov    (&DWP($bias+16,"esp"),0x7080<<16);
246         &mov    (&DWP($bias+20,"esp"),0x6CA0<<16);
247         &mov    (&DWP($bias+24,"esp"),0x48C0<<16);
248         &mov    (&DWP($bias+28,"esp"),0x54E0<<16);
249         &mov    (&DWP($bias+32,"esp"),0xE100<<16);
250         &mov    (&DWP($bias+36,"esp"),0xFD20<<16);
251         &mov    (&DWP($bias+40,"esp"),0xD940<<16);
252         &mov    (&DWP($bias+44,"esp"),0xC560<<16);
253         &mov    (&DWP($bias+48,"esp"),0x9180<<16);
254         &mov    (&DWP($bias+52,"esp"),0x8DA0<<16);
255         &mov    (&DWP($bias+56,"esp"),0xA9C0<<16);
256         &mov    (&DWP($bias+60,"esp"),0xB5E0<<16);
257 }
258 \f
259 $suffix = $x86only ? "" : "_x86";
260
261 &function_begin("gcm_gmult_4bit".$suffix);
262         &stack_push(16+4+1);                    # +1 for stack alignment
263         &mov    ($inp,&wparam(0));              # load Xi
264         &mov    ($Htbl,&wparam(1));             # load Htable
265
266         &mov    ($Zhh,&DWP(0,$inp));            # load Xi[16]
267         &mov    ($Zhl,&DWP(4,$inp));
268         &mov    ($Zlh,&DWP(8,$inp));
269         &mov    ($Zll,&DWP(12,$inp));
270
271         &deposit_rem_4bit(16);
272
273         &mov    (&DWP(0,"esp"),$Zhh);           # copy Xi[16] on stack
274         &mov    (&DWP(4,"esp"),$Zhl);
275         &mov    (&DWP(8,"esp"),$Zlh);
276         &mov    (&DWP(12,"esp"),$Zll);
277         &shr    ($Zll,20);
278         &and    ($Zll,0xf0);
279
280         if ($unroll) {
281                 &call   ("_x86_gmult_4bit_inner");
282         } else {
283                 &x86_loop(0);
284                 &mov    ($inp,&wparam(0));
285         }
286
287         &mov    (&DWP(12,$inp),$Zll);
288         &mov    (&DWP(8,$inp),$Zlh);
289         &mov    (&DWP(4,$inp),$Zhl);
290         &mov    (&DWP(0,$inp),$Zhh);
291         &stack_pop(16+4+1);
292 &function_end("gcm_gmult_4bit".$suffix);
293
294 &function_begin("gcm_ghash_4bit".$suffix);
295         &stack_push(16+4+1);                    # +1 for 64-bit alignment
296         &mov    ($Zll,&wparam(0));              # load Xi
297         &mov    ($Htbl,&wparam(1));             # load Htable
298         &mov    ($inp,&wparam(2));              # load in
299         &mov    ("ecx",&wparam(3));             # load len
300         &add    ("ecx",$inp);
301         &mov    (&wparam(3),"ecx");
302
303         &mov    ($Zhh,&DWP(0,$Zll));            # load Xi[16]
304         &mov    ($Zhl,&DWP(4,$Zll));
305         &mov    ($Zlh,&DWP(8,$Zll));
306         &mov    ($Zll,&DWP(12,$Zll));
307
308         &deposit_rem_4bit(16);
309
310     &set_label("x86_outer_loop",16);
311         &xor    ($Zll,&DWP(12,$inp));           # xor with input
312         &xor    ($Zlh,&DWP(8,$inp));
313         &xor    ($Zhl,&DWP(4,$inp));
314         &xor    ($Zhh,&DWP(0,$inp));
315         &mov    (&DWP(12,"esp"),$Zll);          # dump it on stack
316         &mov    (&DWP(8,"esp"),$Zlh);
317         &mov    (&DWP(4,"esp"),$Zhl);
318         &mov    (&DWP(0,"esp"),$Zhh);
319
320         &shr    ($Zll,20);
321         &and    ($Zll,0xf0);
322
323         if ($unroll) {
324                 &call   ("_x86_gmult_4bit_inner");
325         } else {
326                 &x86_loop(0);
327                 &mov    ($inp,&wparam(2));
328         }
329         &lea    ($inp,&DWP(16,$inp));
330         &cmp    ($inp,&wparam(3));
331         &mov    (&wparam(2),$inp)       if (!$unroll);
332         &jb     (&label("x86_outer_loop"));
333
334         &mov    ($inp,&wparam(0));      # load Xi
335         &mov    (&DWP(12,$inp),$Zll);
336         &mov    (&DWP(8,$inp),$Zlh);
337         &mov    (&DWP(4,$inp),$Zhl);
338         &mov    (&DWP(0,$inp),$Zhh);
339         &stack_pop(16+4+1);
340 &function_end("gcm_ghash_4bit".$suffix);
341 \f
342 if (!$x86only) {{{
343
344 &static_label("rem_4bit");
345
346 if (!$sse2) {{  # pure-MMX "May" version...
347
348 $S=12;          # shift factor for rem_4bit
349
350 &function_begin_B("_mmx_gmult_4bit_inner");
351 # MMX version performs 3.5 times better on P4 (see comment in non-MMX
352 # routine for further details), 100% better on Opteron, ~70% better
353 # on Core2 and PIII... In other words effort is considered to be well
354 # spent... Since initial release the loop was unrolled in order to
355 # "liberate" register previously used as loop counter. Instead it's
356 # used to optimize critical path in 'Z.hi ^= rem_4bit[Z.lo&0xf]'.
357 # The path involves move of Z.lo from MMX to integer register,
358 # effective address calculation and finally merge of value to Z.hi.
359 # Reference to rem_4bit is scheduled so late that I had to >>4
360 # rem_4bit elements. This resulted in 20-45% procent improvement
361 # on contemporary ยต-archs.
362 {
363     my $cnt;
364     my $rem_4bit = "eax";
365     my @rem = ($Zhh,$Zll);
366     my $nhi = $Zhl;
367     my $nlo = $Zlh;
368
369     my ($Zlo,$Zhi) = ("mm0","mm1");
370     my $tmp = "mm2";
371
372         &xor    ($nlo,$nlo);    # avoid partial register stalls on PIII
373         &mov    ($nhi,$Zll);
374         &mov    (&LB($nlo),&LB($nhi));
375         &shl    (&LB($nlo),4);
376         &and    ($nhi,0xf0);
377         &movq   ($Zlo,&QWP(8,$Htbl,$nlo));
378         &movq   ($Zhi,&QWP(0,$Htbl,$nlo));
379         &movd   ($rem[0],$Zlo);
380
381         for ($cnt=28;$cnt>=-2;$cnt--) {
382             my $odd = $cnt&1;
383             my $nix = $odd ? $nlo : $nhi;
384
385                 &shl    (&LB($nlo),4)                   if ($odd);
386                 &psrlq  ($Zlo,4);
387                 &movq   ($tmp,$Zhi);
388                 &psrlq  ($Zhi,4);
389                 &pxor   ($Zlo,&QWP(8,$Htbl,$nix));
390                 &mov    (&LB($nlo),&BP($cnt/2,$inp))    if (!$odd && $cnt>=0);
391                 &psllq  ($tmp,60);
392                 &and    ($nhi,0xf0)                     if ($odd);
393                 &pxor   ($Zhi,&QWP(0,$rem_4bit,$rem[1],8)) if ($cnt<28);
394                 &and    ($rem[0],0xf);
395                 &pxor   ($Zhi,&QWP(0,$Htbl,$nix));
396                 &mov    ($nhi,$nlo)                     if (!$odd && $cnt>=0);
397                 &movd   ($rem[1],$Zlo);
398                 &pxor   ($Zlo,$tmp);
399
400                 push    (@rem,shift(@rem));             # "rotate" registers
401         }
402
403         &mov    ($inp,&DWP(4,$rem_4bit,$rem[1],8));     # last rem_4bit[rem]
404
405         &psrlq  ($Zlo,32);      # lower part of Zlo is already there
406         &movd   ($Zhl,$Zhi);
407         &psrlq  ($Zhi,32);
408         &movd   ($Zlh,$Zlo);
409         &movd   ($Zhh,$Zhi);
410         &shl    ($inp,4);       # compensate for rem_4bit[i] being >>4
411
412         &bswap  ($Zll);
413         &bswap  ($Zhl);
414         &bswap  ($Zlh);
415         &xor    ($Zhh,$inp);
416         &bswap  ($Zhh);
417
418         &ret    ();
419 }
420 &function_end_B("_mmx_gmult_4bit_inner");
421
422 &function_begin("gcm_gmult_4bit_mmx");
423         &mov    ($inp,&wparam(0));      # load Xi
424         &mov    ($Htbl,&wparam(1));     # load Htable
425
426         &call   (&label("pic_point"));
427         &set_label("pic_point");
428         &blindpop("eax");
429         &lea    ("eax",&DWP(&label("rem_4bit")."-".&label("pic_point"),"eax"));
430
431         &movz   ($Zll,&BP(15,$inp));
432
433         &call   ("_mmx_gmult_4bit_inner");
434
435         &mov    ($inp,&wparam(0));      # load Xi
436         &emms   ();
437         &mov    (&DWP(12,$inp),$Zll);
438         &mov    (&DWP(4,$inp),$Zhl);
439         &mov    (&DWP(8,$inp),$Zlh);
440         &mov    (&DWP(0,$inp),$Zhh);
441 &function_end("gcm_gmult_4bit_mmx");
442 \f
443 # Streamed version performs 20% better on P4, 7% on Opteron,
444 # 10% on Core2 and PIII...
445 &function_begin("gcm_ghash_4bit_mmx");
446         &mov    ($Zhh,&wparam(0));      # load Xi
447         &mov    ($Htbl,&wparam(1));     # load Htable
448         &mov    ($inp,&wparam(2));      # load in
449         &mov    ($Zlh,&wparam(3));      # load len
450
451         &call   (&label("pic_point"));
452         &set_label("pic_point");
453         &blindpop("eax");
454         &lea    ("eax",&DWP(&label("rem_4bit")."-".&label("pic_point"),"eax"));
455
456         &add    ($Zlh,$inp);
457         &mov    (&wparam(3),$Zlh);      # len to point at the end of input
458         &stack_push(4+1);               # +1 for stack alignment
459
460         &mov    ($Zll,&DWP(12,$Zhh));   # load Xi[16]
461         &mov    ($Zhl,&DWP(4,$Zhh));
462         &mov    ($Zlh,&DWP(8,$Zhh));
463         &mov    ($Zhh,&DWP(0,$Zhh));
464         &jmp    (&label("mmx_outer_loop"));
465
466     &set_label("mmx_outer_loop",16);
467         &xor    ($Zll,&DWP(12,$inp));
468         &xor    ($Zhl,&DWP(4,$inp));
469         &xor    ($Zlh,&DWP(8,$inp));
470         &xor    ($Zhh,&DWP(0,$inp));
471         &mov    (&wparam(2),$inp);
472         &mov    (&DWP(12,"esp"),$Zll);
473         &mov    (&DWP(4,"esp"),$Zhl);
474         &mov    (&DWP(8,"esp"),$Zlh);
475         &mov    (&DWP(0,"esp"),$Zhh);
476
477         &mov    ($inp,"esp");
478         &shr    ($Zll,24);
479
480         &call   ("_mmx_gmult_4bit_inner");
481
482         &mov    ($inp,&wparam(2));
483         &lea    ($inp,&DWP(16,$inp));
484         &cmp    ($inp,&wparam(3));
485         &jb     (&label("mmx_outer_loop"));
486
487         &mov    ($inp,&wparam(0));      # load Xi
488         &emms   ();
489         &mov    (&DWP(12,$inp),$Zll);
490         &mov    (&DWP(4,$inp),$Zhl);
491         &mov    (&DWP(8,$inp),$Zlh);
492         &mov    (&DWP(0,$inp),$Zhh);
493
494         &stack_pop(4+1);
495 &function_end("gcm_ghash_4bit_mmx");
496 \f
497 }} else {{      # "June" MMX version...
498                 # ... has slower "April" gcm_gmult_4bit_mmx with folded
499                 # loop. This is done to conserve code size...
500 $S=16;          # shift factor for rem_4bit
501
502 sub mmx_loop() {
503 # MMX version performs 2.8 times better on P4 (see comment in non-MMX
504 # routine for further details), 40% better on Opteron and Core2, 50%
505 # better on PIII... In other words effort is considered to be well
506 # spent...
507     my $inp = shift;
508     my $rem_4bit = shift;
509     my $cnt = $Zhh;
510     my $nhi = $Zhl;
511     my $nlo = $Zlh;
512     my $rem = $Zll;
513
514     my ($Zlo,$Zhi) = ("mm0","mm1");
515     my $tmp = "mm2";
516
517         &xor    ($nlo,$nlo);    # avoid partial register stalls on PIII
518         &mov    ($nhi,$Zll);
519         &mov    (&LB($nlo),&LB($nhi));
520         &mov    ($cnt,14);
521         &shl    (&LB($nlo),4);
522         &and    ($nhi,0xf0);
523         &movq   ($Zlo,&QWP(8,$Htbl,$nlo));
524         &movq   ($Zhi,&QWP(0,$Htbl,$nlo));
525         &movd   ($rem,$Zlo);
526         &jmp    (&label("mmx_loop"));
527
528     &set_label("mmx_loop",16);
529         &psrlq  ($Zlo,4);
530         &and    ($rem,0xf);
531         &movq   ($tmp,$Zhi);
532         &psrlq  ($Zhi,4);
533         &pxor   ($Zlo,&QWP(8,$Htbl,$nhi));
534         &mov    (&LB($nlo),&BP(0,$inp,$cnt));
535         &psllq  ($tmp,60);
536         &pxor   ($Zhi,&QWP(0,$rem_4bit,$rem,8));
537         &dec    ($cnt);
538         &movd   ($rem,$Zlo);
539         &pxor   ($Zhi,&QWP(0,$Htbl,$nhi));
540         &mov    ($nhi,$nlo);
541         &pxor   ($Zlo,$tmp);
542         &js     (&label("mmx_break"));
543
544         &shl    (&LB($nlo),4);
545         &and    ($rem,0xf);
546         &psrlq  ($Zlo,4);
547         &and    ($nhi,0xf0);
548         &movq   ($tmp,$Zhi);
549         &psrlq  ($Zhi,4);
550         &pxor   ($Zlo,&QWP(8,$Htbl,$nlo));
551         &psllq  ($tmp,60);
552         &pxor   ($Zhi,&QWP(0,$rem_4bit,$rem,8));
553         &movd   ($rem,$Zlo);
554         &pxor   ($Zhi,&QWP(0,$Htbl,$nlo));
555         &pxor   ($Zlo,$tmp);
556         &jmp    (&label("mmx_loop"));
557
558     &set_label("mmx_break",16);
559         &shl    (&LB($nlo),4);
560         &and    ($rem,0xf);
561         &psrlq  ($Zlo,4);
562         &and    ($nhi,0xf0);
563         &movq   ($tmp,$Zhi);
564         &psrlq  ($Zhi,4);
565         &pxor   ($Zlo,&QWP(8,$Htbl,$nlo));
566         &psllq  ($tmp,60);
567         &pxor   ($Zhi,&QWP(0,$rem_4bit,$rem,8));
568         &movd   ($rem,$Zlo);
569         &pxor   ($Zhi,&QWP(0,$Htbl,$nlo));
570         &pxor   ($Zlo,$tmp);
571
572         &psrlq  ($Zlo,4);
573         &and    ($rem,0xf);
574         &movq   ($tmp,$Zhi);
575         &psrlq  ($Zhi,4);
576         &pxor   ($Zlo,&QWP(8,$Htbl,$nhi));
577         &psllq  ($tmp,60);
578         &pxor   ($Zhi,&QWP(0,$rem_4bit,$rem,8));
579         &movd   ($rem,$Zlo);
580         &pxor   ($Zhi,&QWP(0,$Htbl,$nhi));
581         &pxor   ($Zlo,$tmp);
582
583         &psrlq  ($Zlo,32);      # lower part of Zlo is already there
584         &movd   ($Zhl,$Zhi);
585         &psrlq  ($Zhi,32);
586         &movd   ($Zlh,$Zlo);
587         &movd   ($Zhh,$Zhi);
588
589         &bswap  ($Zll);
590         &bswap  ($Zhl);
591         &bswap  ($Zlh);
592         &bswap  ($Zhh);
593 }
594
595 &function_begin("gcm_gmult_4bit_mmx");
596         &mov    ($inp,&wparam(0));      # load Xi
597         &mov    ($Htbl,&wparam(1));     # load Htable
598
599         &call   (&label("pic_point"));
600         &set_label("pic_point");
601         &blindpop("eax");
602         &lea    ("eax",&DWP(&label("rem_4bit")."-".&label("pic_point"),"eax"));
603
604         &movz   ($Zll,&BP(15,$inp));
605
606         &mmx_loop($inp,"eax");
607
608         &emms   ();
609         &mov    (&DWP(12,$inp),$Zll);
610         &mov    (&DWP(4,$inp),$Zhl);
611         &mov    (&DWP(8,$inp),$Zlh);
612         &mov    (&DWP(0,$inp),$Zhh);
613 &function_end("gcm_gmult_4bit_mmx");
614 \f
615 ######################################################################
616 # Below subroutine is "528B" variant of "4-bit" GCM GHASH function
617 # (see gcm128.c for details). It provides further 20-40% performance
618 # improvement over above mentioned "May" version.
619
620 &static_label("rem_8bit");
621
622 &function_begin("gcm_ghash_4bit_mmx");
623 { my ($Zlo,$Zhi) = ("mm7","mm6");
624   my $rem_8bit = "esi";
625   my $Htbl = "ebx";
626
627     # parameter block
628     &mov        ("eax",&wparam(0));             # Xi
629     &mov        ("ebx",&wparam(1));             # Htable
630     &mov        ("ecx",&wparam(2));             # inp
631     &mov        ("edx",&wparam(3));             # len
632     &mov        ("ebp","esp");                  # original %esp
633     &call       (&label("pic_point"));
634     &set_label  ("pic_point");
635     &blindpop   ($rem_8bit);
636     &lea        ($rem_8bit,&DWP(&label("rem_8bit")."-".&label("pic_point"),$rem_8bit));
637
638     &sub        ("esp",512+16+16);              # allocate stack frame...
639     &and        ("esp",-64);                    # ...and align it
640     &sub        ("esp",16);                     # place for (u8)(H[]<<4)
641
642     &add        ("edx","ecx");                  # pointer to the end of input
643     &mov        (&DWP(528+16+0,"esp"),"eax");   # save Xi
644     &mov        (&DWP(528+16+8,"esp"),"edx");   # save inp+len
645     &mov        (&DWP(528+16+12,"esp"),"ebp");  # save original %esp
646
647     { my @lo  = ("mm0","mm1","mm2");
648       my @hi  = ("mm3","mm4","mm5");
649       my @tmp = ("mm6","mm7");
650       my ($off1,$off2,$i) = (0,0,);
651
652       &add      ($Htbl,128);                    # optimize for size
653       &lea      ("edi",&DWP(16+128,"esp"));
654       &lea      ("ebp",&DWP(16+256+128,"esp"));
655
656       # decompose Htable (low and high parts are kept separately),
657       # generate Htable[]>>4, (u8)(Htable[]<<4), save to stack...
658       for ($i=0;$i<18;$i++) {
659
660         &mov    ("edx",&DWP(16*$i+8-128,$Htbl))         if ($i<16);
661         &movq   ($lo[0],&QWP(16*$i+8-128,$Htbl))        if ($i<16);
662         &psllq  ($tmp[1],60)                            if ($i>1);
663         &movq   ($hi[0],&QWP(16*$i+0-128,$Htbl))        if ($i<16);
664         &por    ($lo[2],$tmp[1])                        if ($i>1);
665         &movq   (&QWP($off1-128,"edi"),$lo[1])          if ($i>0 && $i<17);
666         &psrlq  ($lo[1],4)                              if ($i>0 && $i<17);
667         &movq   (&QWP($off1,"edi"),$hi[1])              if ($i>0 && $i<17);
668         &movq   ($tmp[0],$hi[1])                        if ($i>0 && $i<17);
669         &movq   (&QWP($off2-128,"ebp"),$lo[2])          if ($i>1);
670         &psrlq  ($hi[1],4)                              if ($i>0 && $i<17);
671         &movq   (&QWP($off2,"ebp"),$hi[2])              if ($i>1);
672         &shl    ("edx",4)                               if ($i<16);
673         &mov    (&BP($i,"esp"),&LB("edx"))              if ($i<16);
674
675         unshift (@lo,pop(@lo));                 # "rotate" registers
676         unshift (@hi,pop(@hi));
677         unshift (@tmp,pop(@tmp));
678         $off1 += 8      if ($i>0);
679         $off2 += 8      if ($i>1);
680       }
681     }
682
683     &movq       ($Zhi,&QWP(0,"eax"));
684     &mov        ("ebx",&DWP(8,"eax"));
685     &mov        ("edx",&DWP(12,"eax"));         # load Xi
686
687 &set_label("outer",16);
688   { my $nlo = "eax";
689     my $dat = "edx";
690     my @nhi = ("edi","ebp");
691     my @rem = ("ebx","ecx");
692     my @red = ("mm0","mm1","mm2");
693     my $tmp = "mm3";
694
695     &xor        ($dat,&DWP(12,"ecx"));          # merge input data
696     &xor        ("ebx",&DWP(8,"ecx"));
697     &pxor       ($Zhi,&QWP(0,"ecx"));
698     &lea        ("ecx",&DWP(16,"ecx"));         # inp+=16
699     #&mov       (&DWP(528+12,"esp"),$dat);      # save inp^Xi
700     &mov        (&DWP(528+8,"esp"),"ebx");
701     &movq       (&QWP(528+0,"esp"),$Zhi);
702     &mov        (&DWP(528+16+4,"esp"),"ecx");   # save inp
703
704     &xor        ($nlo,$nlo);
705     &rol        ($dat,8);
706     &mov        (&LB($nlo),&LB($dat));
707     &mov        ($nhi[1],$nlo);
708     &and        (&LB($nlo),0x0f);
709     &shr        ($nhi[1],4);
710     &pxor       ($red[0],$red[0]);
711     &rol        ($dat,8);                       # next byte
712     &pxor       ($red[1],$red[1]);
713     &pxor       ($red[2],$red[2]);
714
715     # Just like in "May" verson modulo-schedule for critical path in
716     # 'Z.hi ^= rem_8bit[Z.lo&0xff^((u8)H[nhi]<<4)]<<48'. Final 'pxor'
717     # is scheduled so late that rem_8bit[] has to be shifted *right*
718     # by 16, which is why last argument to pinsrw is 2, which
719     # corresponds to <<32=<<48>>16...
720     for ($j=11,$i=0;$i<15;$i++) {
721
722       if ($i>0) {
723         &pxor   ($Zlo,&QWP(16,"esp",$nlo,8));           # Z^=H[nlo]
724         &rol    ($dat,8);                               # next byte
725         &pxor   ($Zhi,&QWP(16+128,"esp",$nlo,8));
726
727         &pxor   ($Zlo,$tmp);
728         &pxor   ($Zhi,&QWP(16+256+128,"esp",$nhi[0],8));
729         &xor    (&LB($rem[1]),&BP(0,"esp",$nhi[0]));    # rem^(H[nhi]<<4)
730       } else {
731         &movq   ($Zlo,&QWP(16,"esp",$nlo,8));
732         &movq   ($Zhi,&QWP(16+128,"esp",$nlo,8));
733       }
734
735         &mov    (&LB($nlo),&LB($dat));
736         &mov    ($dat,&DWP(528+$j,"esp"))               if (--$j%4==0);
737
738         &movd   ($rem[0],$Zlo);
739         &movz   ($rem[1],&LB($rem[1]))                  if ($i>0);
740         &psrlq  ($Zlo,8);                               # Z>>=8
741
742         &movq   ($tmp,$Zhi);
743         &mov    ($nhi[0],$nlo);
744         &psrlq  ($Zhi,8);
745
746         &pxor   ($Zlo,&QWP(16+256+0,"esp",$nhi[1],8));  # Z^=H[nhi]>>4
747         &and    (&LB($nlo),0x0f);
748         &psllq  ($tmp,56);
749
750         &pxor   ($Zhi,$red[1])                          if ($i>1);
751         &shr    ($nhi[0],4);
752         &pinsrw ($red[0],&WP(0,$rem_8bit,$rem[1],2),2)  if ($i>0);
753
754         unshift (@red,pop(@red));                       # "rotate" registers
755         unshift (@rem,pop(@rem));
756         unshift (@nhi,pop(@nhi));
757     }
758
759     &pxor       ($Zlo,&QWP(16,"esp",$nlo,8));           # Z^=H[nlo]
760     &pxor       ($Zhi,&QWP(16+128,"esp",$nlo,8));
761     &xor        (&LB($rem[1]),&BP(0,"esp",$nhi[0]));    # rem^(H[nhi]<<4)
762
763     &pxor       ($Zlo,$tmp);
764     &pxor       ($Zhi,&QWP(16+256+128,"esp",$nhi[0],8));
765     &movz       ($rem[1],&LB($rem[1]));
766
767     &pxor       ($red[2],$red[2]);                      # clear 2nd word
768     &psllq      ($red[1],4);
769
770     &movd       ($rem[0],$Zlo);
771     &psrlq      ($Zlo,4);                               # Z>>=4
772
773     &movq       ($tmp,$Zhi);
774     &psrlq      ($Zhi,4);
775     &shl        ($rem[0],4);                            # rem<<4
776
777     &pxor       ($Zlo,&QWP(16,"esp",$nhi[1],8));        # Z^=H[nhi]
778     &psllq      ($tmp,60);
779     &movz       ($rem[0],&LB($rem[0]));
780
781     &pxor       ($Zlo,$tmp);
782     &pxor       ($Zhi,&QWP(16+128,"esp",$nhi[1],8));
783
784     &pinsrw     ($red[0],&WP(0,$rem_8bit,$rem[1],2),2);
785     &pxor       ($Zhi,$red[1]);
786
787     &movd       ($dat,$Zlo);
788     &pinsrw     ($red[2],&WP(0,$rem_8bit,$rem[0],2),3); # last is <<48
789
790     &psllq      ($red[0],12);                           # correct by <<16>>4
791     &pxor       ($Zhi,$red[0]);
792     &psrlq      ($Zlo,32);
793     &pxor       ($Zhi,$red[2]);
794
795     &mov        ("ecx",&DWP(528+16+4,"esp"));   # restore inp
796     &movd       ("ebx",$Zlo);
797     &movq       ($tmp,$Zhi);                    # 01234567
798     &psllw      ($Zhi,8);                       # 1.3.5.7.
799     &psrlw      ($tmp,8);                       # .0.2.4.6
800     &por        ($Zhi,$tmp);                    # 10325476
801     &bswap      ($dat);
802     &pshufw     ($Zhi,$Zhi,0b00011011);         # 76543210
803     &bswap      ("ebx");
804     
805     &cmp        ("ecx",&DWP(528+16+8,"esp"));   # are we done?
806     &jne        (&label("outer"));
807   }
808
809     &mov        ("eax",&DWP(528+16+0,"esp"));   # restore Xi
810     &mov        (&DWP(12,"eax"),"edx");
811     &mov        (&DWP(8,"eax"),"ebx");
812     &movq       (&QWP(0,"eax"),$Zhi);
813
814     &mov        ("esp",&DWP(528+16+12,"esp"));  # restore original %esp
815     &emms       ();
816 }
817 &function_end("gcm_ghash_4bit_mmx");
818 }}
819 \f
820 if ($sse2) {{
821 ######################################################################
822 # PCLMULQDQ version.
823
824 $Xip="eax";
825 $Htbl="edx";
826 $const="ecx";
827 $inp="esi";
828 $len="ebx";
829
830 ($Xi,$Xhi)=("xmm0","xmm1");     $Hkey="xmm2";
831 ($T1,$T2,$T3)=("xmm3","xmm4","xmm5");
832 ($Xn,$Xhn)=("xmm6","xmm7");
833
834 &static_label("bswap");
835
836 sub clmul64x64_T2 {     # minimal "register" pressure
837 my ($Xhi,$Xi,$Hkey,$HK)=@_;
838
839         &movdqa         ($Xhi,$Xi);             #
840         &pshufd         ($T1,$Xi,0b01001110);
841         &pshufd         ($T2,$Hkey,0b01001110)  if (!defined($HK));
842         &pxor           ($T1,$Xi);              #
843         &pxor           ($T2,$Hkey)             if (!defined($HK));
844                         $HK=$T2                 if (!defined($HK));
845
846         &pclmulqdq      ($Xi,$Hkey,0x00);       #######
847         &pclmulqdq      ($Xhi,$Hkey,0x11);      #######
848         &pclmulqdq      ($T1,$HK,0x00);         #######
849         &xorps          ($T1,$Xi);              #
850         &xorps          ($T1,$Xhi);             #
851
852         &movdqa         ($T2,$T1);              #
853         &psrldq         ($T1,8);
854         &pslldq         ($T2,8);                #
855         &pxor           ($Xhi,$T1);
856         &pxor           ($Xi,$T2);              #
857 }
858
859 sub clmul64x64_T3 {
860 # Even though this subroutine offers visually better ILP, it
861 # was empirically found to be a tad slower than above version.
862 # At least in gcm_ghash_clmul context. But it's just as well,
863 # because loop modulo-scheduling is possible only thanks to
864 # minimized "register" pressure...
865 my ($Xhi,$Xi,$Hkey)=@_;
866
867         &movdqa         ($T1,$Xi);              #
868         &movdqa         ($Xhi,$Xi);
869         &pclmulqdq      ($Xi,$Hkey,0x00);       #######
870         &pclmulqdq      ($Xhi,$Hkey,0x11);      #######
871         &pshufd         ($T2,$T1,0b01001110);   #
872         &pshufd         ($T3,$Hkey,0b01001110);
873         &pxor           ($T2,$T1);              #
874         &pxor           ($T3,$Hkey);
875         &pclmulqdq      ($T2,$T3,0x00);         #######
876         &pxor           ($T2,$Xi);              #
877         &pxor           ($T2,$Xhi);             #
878
879         &movdqa         ($T3,$T2);              #
880         &psrldq         ($T2,8);
881         &pslldq         ($T3,8);                #
882         &pxor           ($Xhi,$T2);
883         &pxor           ($Xi,$T3);              #
884 }
885 \f
886 if (1) {                # Algorithm 9 with <<1 twist.
887                         # Reduction is shorter and uses only two
888                         # temporary registers, which makes it better
889                         # candidate for interleaving with 64x64
890                         # multiplication. Pre-modulo-scheduled loop
891                         # was found to be ~20% faster than Algorithm 5
892                         # below. Algorithm 9 was therefore chosen for
893                         # further optimization...
894
895 sub reduction_alg9 {    # 17/11 times faster than Intel version
896 my ($Xhi,$Xi) = @_;
897
898         # 1st phase
899         &movdqa         ($T2,$Xi);              #
900         &movdqa         ($T1,$Xi);
901         &psllq          ($Xi,5);
902         &pxor           ($T1,$Xi);              #
903         &psllq          ($Xi,1);
904         &pxor           ($Xi,$T1);              #
905         &psllq          ($Xi,57);               #
906         &movdqa         ($T1,$Xi);              #
907         &pslldq         ($Xi,8);
908         &psrldq         ($T1,8);                #       
909         &pxor           ($Xi,$T2);
910         &pxor           ($Xhi,$T1);             #
911
912         # 2nd phase
913         &movdqa         ($T2,$Xi);
914         &psrlq          ($Xi,1);
915         &pxor           ($Xhi,$T2);             #
916         &pxor           ($T2,$Xi);
917         &psrlq          ($Xi,5);
918         &pxor           ($Xi,$T2);              #
919         &psrlq          ($Xi,1);                #
920         &pxor           ($Xi,$Xhi)              #
921 }
922
923 &function_begin_B("gcm_init_clmul");
924         &mov            ($Htbl,&wparam(0));
925         &mov            ($Xip,&wparam(1));
926
927         &call           (&label("pic"));
928 &set_label("pic");
929         &blindpop       ($const);
930         &lea            ($const,&DWP(&label("bswap")."-".&label("pic"),$const));
931
932         &movdqu         ($Hkey,&QWP(0,$Xip));
933         &pshufd         ($Hkey,$Hkey,0b01001110);# dword swap
934
935         # <<1 twist
936         &pshufd         ($T2,$Hkey,0b11111111); # broadcast uppermost dword
937         &movdqa         ($T1,$Hkey);
938         &psllq          ($Hkey,1);
939         &pxor           ($T3,$T3);              #
940         &psrlq          ($T1,63);
941         &pcmpgtd        ($T3,$T2);              # broadcast carry bit
942         &pslldq         ($T1,8);
943         &por            ($Hkey,$T1);            # H<<=1
944
945         # magic reduction
946         &pand           ($T3,&QWP(16,$const));  # 0x1c2_polynomial
947         &pxor           ($Hkey,$T3);            # if(carry) H^=0x1c2_polynomial
948
949         # calculate H^2
950         &movdqa         ($Xi,$Hkey);
951         &clmul64x64_T2  ($Xhi,$Xi,$Hkey);
952         &reduction_alg9 ($Xhi,$Xi);
953
954         &pshufd         ($T1,$Hkey,0b01001110);
955         &pshufd         ($T2,$Xi,0b01001110);
956         &pxor           ($T1,$Hkey);            # Karatsuba pre-processing
957         &movdqu         (&QWP(0,$Htbl),$Hkey);  # save H
958         &pxor           ($T2,$Xi);              # Karatsuba pre-processing
959         &movdqu         (&QWP(16,$Htbl),$Xi);   # save H^2
960         &palignr        ($T2,$T1,8);            # low part is H.lo^H.hi
961         &movdqu         (&QWP(32,$Htbl),$T2);   # save Karatsuba "salt"
962
963         &ret            ();
964 &function_end_B("gcm_init_clmul");
965
966 &function_begin_B("gcm_gmult_clmul");
967         &mov            ($Xip,&wparam(0));
968         &mov            ($Htbl,&wparam(1));
969
970         &call           (&label("pic"));
971 &set_label("pic");
972         &blindpop       ($const);
973         &lea            ($const,&DWP(&label("bswap")."-".&label("pic"),$const));
974
975         &movdqu         ($Xi,&QWP(0,$Xip));
976         &movdqa         ($T3,&QWP(0,$const));
977         &movups         ($Hkey,&QWP(0,$Htbl));
978         &pshufb         ($Xi,$T3);
979         &movups         ($T2,&QWP(32,$Htbl));
980
981         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$T2);
982         &reduction_alg9 ($Xhi,$Xi);
983
984         &pshufb         ($Xi,$T3);
985         &movdqu         (&QWP(0,$Xip),$Xi);
986
987         &ret    ();
988 &function_end_B("gcm_gmult_clmul");
989
990 &function_begin("gcm_ghash_clmul");
991         &mov            ($Xip,&wparam(0));
992         &mov            ($Htbl,&wparam(1));
993         &mov            ($inp,&wparam(2));
994         &mov            ($len,&wparam(3));
995
996         &call           (&label("pic"));
997 &set_label("pic");
998         &blindpop       ($const);
999         &lea            ($const,&DWP(&label("bswap")."-".&label("pic"),$const));
1000
1001         &movdqu         ($Xi,&QWP(0,$Xip));
1002         &movdqa         ($T3,&QWP(0,$const));
1003         &movdqu         ($Hkey,&QWP(0,$Htbl));
1004         &pshufb         ($Xi,$T3);
1005
1006         &sub            ($len,0x10);
1007         &jz             (&label("odd_tail"));
1008
1009         #######
1010         # Xi+2 =[H*(Ii+1 + Xi+1)] mod P =
1011         #       [(H*Ii+1) + (H*Xi+1)] mod P =
1012         #       [(H*Ii+1) + H^2*(Ii+Xi)] mod P
1013         #
1014         &movdqu         ($T1,&QWP(0,$inp));     # Ii
1015         &movdqu         ($Xn,&QWP(16,$inp));    # Ii+1
1016         &pshufb         ($T1,$T3);
1017         &pshufb         ($Xn,$T3);
1018         &movdqu         ($T3,&QWP(32,$Htbl));
1019         &pxor           ($Xi,$T1);              # Ii+Xi
1020
1021         &pshufd         ($T1,$Xn,0b01001110);   # H*Ii+1
1022         &movdqa         ($Xhn,$Xn);
1023         &pxor           ($T1,$Xn);              #
1024
1025         &pclmulqdq      ($Xn,$Hkey,0x00);       #######
1026         &pclmulqdq      ($Xhn,$Hkey,0x11);      #######
1027         &movups         ($Hkey,&QWP(16,$Htbl)); # load H^2
1028         &pclmulqdq      ($T1,$T3,0x00);         #######
1029
1030         &lea            ($inp,&DWP(32,$inp));   # i+=2
1031         &sub            ($len,0x20);
1032         &jbe            (&label("even_tail"));
1033         &jmp            (&label("mod_loop"));
1034
1035 &set_label("mod_loop",32);
1036         &pshufd         ($T2,$Xi,0b01001110);   # H^2*(Ii+Xi)
1037         &movdqa         ($Xhi,$Xi);
1038         &pxor           ($T2,$Xi);              #
1039
1040         &pclmulqdq      ($Xi,$Hkey,0x00);       #######
1041         &pclmulqdq      ($Xhi,$Hkey,0x11);      #######
1042         &movups         ($Hkey,&QWP(0,$Htbl));  # load H
1043         &pclmulqdq      ($T2,$T3,0x10);         #######
1044         &movdqa         ($T3,&QWP(0,$const));
1045
1046         &xorps          ($Xi,$Xn);              # (H*Ii+1) + H^2*(Ii+Xi)
1047         &xorps          ($Xhi,$Xhn);
1048          &movdqu        ($Xhn,&QWP(0,$inp));    # Ii
1049         &pxor           ($T1,$Xi);              # aggregated Karatsuba post-processing
1050          &movdqu        ($Xn,&QWP(16,$inp));    # Ii+1
1051         &pxor           ($T1,$Xhi);             #
1052
1053         &pxor           ($T2,$T1);              #
1054          &pshufb        ($Xhn,$T3);
1055
1056         &movdqa         ($T1,$T2);              #
1057         &psrldq         ($T2,8);
1058         &pslldq         ($T1,8);                #
1059         &pxor           ($Xhi,$T2);
1060         &pxor           ($Xi,$T1);              #
1061          &pshufb        ($Xn,$T3);
1062          &pxor          ($Xhi,$Xhn);            # "Ii+Xi", consume early
1063
1064         &movdqa         ($Xhn,$Xn);             #&clmul64x64_TX ($Xhn,$Xn,$Hkey); H*Ii+1
1065           &movdqa       ($T2,$Xi);              #&reduction_alg9($Xhi,$Xi); 1st phase
1066           &movdqa       ($T1,$Xi);
1067           &psllq        ($Xi,5);
1068           &pxor         ($T1,$Xi);              #
1069           &psllq        ($Xi,1);
1070           &pxor         ($Xi,$T1);              #
1071         &movups         ($T3,&QWP(32,$Htbl));
1072         &pclmulqdq      ($Xn,$Hkey,0x00);       #######
1073           &psllq        ($Xi,57);               #
1074           &movdqa       ($T1,$Xi);              #
1075           &pslldq       ($Xi,8);
1076           &psrldq       ($T1,8);                #       
1077           &pxor         ($Xi,$T2);
1078           &pxor         ($Xhi,$T1);             #
1079         &pshufd         ($T1,$Xhn,0b01001110);
1080           &movdqa       ($T2,$Xi);              # 2nd phase
1081           &psrlq        ($Xi,1);
1082         &pxor           ($T1,$Xhn);
1083         &pclmulqdq      ($Xhn,$Hkey,0x11);      #######
1084         &movups         ($Hkey,&QWP(16,$Htbl)); # load H^2
1085           &pxor         ($Xhi,$T2);             #
1086           &pxor         ($T2,$Xi);
1087           &psrlq        ($Xi,5);
1088           &pxor         ($Xi,$T2);              #
1089           &psrlq        ($Xi,1);                #
1090           &pxor         ($Xi,$Xhi)              #
1091         &pclmulqdq      ($T1,$T3,0x00);         #######
1092
1093         &lea            ($inp,&DWP(32,$inp));
1094         &sub            ($len,0x20);
1095         &ja             (&label("mod_loop"));
1096
1097 &set_label("even_tail");
1098         &pshufd         ($T2,$Xi,0b01001110);   # H^2*(Ii+Xi)
1099         &movdqa         ($Xhi,$Xi);
1100         &pxor           ($T2,$Xi);              #
1101
1102         &pclmulqdq      ($Xi,$Hkey,0x00);       #######
1103         &pclmulqdq      ($Xhi,$Hkey,0x11);      #######
1104         &pclmulqdq      ($T2,$T3,0x10);         #######
1105         &movdqa         ($T3,&QWP(0,$const));
1106
1107         &xorps          ($Xi,$Xn);              # (H*Ii+1) + H^2*(Ii+Xi)
1108         &xorps          ($Xhi,$Xhn);
1109         &pxor           ($T1,$Xi);              # aggregated Karatsuba post-processing
1110         &pxor           ($T1,$Xhi);             #
1111
1112         &pxor           ($T2,$T1);              #
1113
1114         &movdqa         ($T1,$T2);              #
1115         &psrldq         ($T2,8);
1116         &pslldq         ($T1,8);                #
1117         &pxor           ($Xhi,$T2);
1118         &pxor           ($Xi,$T1);              #
1119
1120         &reduction_alg9 ($Xhi,$Xi);
1121
1122         &test           ($len,$len);
1123         &jnz            (&label("done"));
1124
1125         &movups         ($Hkey,&QWP(0,$Htbl));  # load H
1126 &set_label("odd_tail");
1127         &movdqu         ($T1,&QWP(0,$inp));     # Ii
1128         &pshufb         ($T1,$T3);
1129         &pxor           ($Xi,$T1);              # Ii+Xi
1130
1131         &clmul64x64_T2  ($Xhi,$Xi,$Hkey);       # H*(Ii+Xi)
1132         &reduction_alg9 ($Xhi,$Xi);
1133
1134 &set_label("done");
1135         &pshufb         ($Xi,$T3);
1136         &movdqu         (&QWP(0,$Xip),$Xi);
1137 &function_end("gcm_ghash_clmul");
1138 \f
1139 } else {                # Algorith 5. Kept for reference purposes.
1140
1141 sub reduction_alg5 {    # 19/16 times faster than Intel version
1142 my ($Xhi,$Xi)=@_;
1143
1144         # <<1
1145         &movdqa         ($T1,$Xi);              #
1146         &movdqa         ($T2,$Xhi);
1147         &pslld          ($Xi,1);
1148         &pslld          ($Xhi,1);               #
1149         &psrld          ($T1,31);
1150         &psrld          ($T2,31);               #
1151         &movdqa         ($T3,$T1);
1152         &pslldq         ($T1,4);
1153         &psrldq         ($T3,12);               #
1154         &pslldq         ($T2,4);
1155         &por            ($Xhi,$T3);             #
1156         &por            ($Xi,$T1);
1157         &por            ($Xhi,$T2);             #
1158
1159         # 1st phase
1160         &movdqa         ($T1,$Xi);
1161         &movdqa         ($T2,$Xi);
1162         &movdqa         ($T3,$Xi);              #
1163         &pslld          ($T1,31);
1164         &pslld          ($T2,30);
1165         &pslld          ($Xi,25);               #
1166         &pxor           ($T1,$T2);
1167         &pxor           ($T1,$Xi);              #
1168         &movdqa         ($T2,$T1);              #
1169         &pslldq         ($T1,12);
1170         &psrldq         ($T2,4);                #
1171         &pxor           ($T3,$T1);
1172
1173         # 2nd phase
1174         &pxor           ($Xhi,$T3);             #
1175         &movdqa         ($Xi,$T3);
1176         &movdqa         ($T1,$T3);
1177         &psrld          ($Xi,1);                #
1178         &psrld          ($T1,2);
1179         &psrld          ($T3,7);                #
1180         &pxor           ($Xi,$T1);
1181         &pxor           ($Xhi,$T2);
1182         &pxor           ($Xi,$T3);              #
1183         &pxor           ($Xi,$Xhi);             #
1184 }
1185
1186 &function_begin_B("gcm_init_clmul");
1187         &mov            ($Htbl,&wparam(0));
1188         &mov            ($Xip,&wparam(1));
1189
1190         &call           (&label("pic"));
1191 &set_label("pic");
1192         &blindpop       ($const);
1193         &lea            ($const,&DWP(&label("bswap")."-".&label("pic"),$const));
1194
1195         &movdqu         ($Hkey,&QWP(0,$Xip));
1196         &pshufd         ($Hkey,$Hkey,0b01001110);# dword swap
1197
1198         # calculate H^2
1199         &movdqa         ($Xi,$Hkey);
1200         &clmul64x64_T3  ($Xhi,$Xi,$Hkey);
1201         &reduction_alg5 ($Xhi,$Xi);
1202
1203         &movdqu         (&QWP(0,$Htbl),$Hkey);  # save H
1204         &movdqu         (&QWP(16,$Htbl),$Xi);   # save H^2
1205
1206         &ret            ();
1207 &function_end_B("gcm_init_clmul");
1208
1209 &function_begin_B("gcm_gmult_clmul");
1210         &mov            ($Xip,&wparam(0));
1211         &mov            ($Htbl,&wparam(1));
1212
1213         &call           (&label("pic"));
1214 &set_label("pic");
1215         &blindpop       ($const);
1216         &lea            ($const,&DWP(&label("bswap")."-".&label("pic"),$const));
1217
1218         &movdqu         ($Xi,&QWP(0,$Xip));
1219         &movdqa         ($Xn,&QWP(0,$const));
1220         &movdqu         ($Hkey,&QWP(0,$Htbl));
1221         &pshufb         ($Xi,$Xn);
1222
1223         &clmul64x64_T3  ($Xhi,$Xi,$Hkey);
1224         &reduction_alg5 ($Xhi,$Xi);
1225
1226         &pshufb         ($Xi,$Xn);
1227         &movdqu         (&QWP(0,$Xip),$Xi);
1228
1229         &ret    ();
1230 &function_end_B("gcm_gmult_clmul");
1231
1232 &function_begin("gcm_ghash_clmul");
1233         &mov            ($Xip,&wparam(0));
1234         &mov            ($Htbl,&wparam(1));
1235         &mov            ($inp,&wparam(2));
1236         &mov            ($len,&wparam(3));
1237
1238         &call           (&label("pic"));
1239 &set_label("pic");
1240         &blindpop       ($const);
1241         &lea            ($const,&DWP(&label("bswap")."-".&label("pic"),$const));
1242
1243         &movdqu         ($Xi,&QWP(0,$Xip));
1244         &movdqa         ($T3,&QWP(0,$const));
1245         &movdqu         ($Hkey,&QWP(0,$Htbl));
1246         &pshufb         ($Xi,$T3);
1247
1248         &sub            ($len,0x10);
1249         &jz             (&label("odd_tail"));
1250
1251         #######
1252         # Xi+2 =[H*(Ii+1 + Xi+1)] mod P =
1253         #       [(H*Ii+1) + (H*Xi+1)] mod P =
1254         #       [(H*Ii+1) + H^2*(Ii+Xi)] mod P
1255         #
1256         &movdqu         ($T1,&QWP(0,$inp));     # Ii
1257         &movdqu         ($Xn,&QWP(16,$inp));    # Ii+1
1258         &pshufb         ($T1,$T3);
1259         &pshufb         ($Xn,$T3);
1260         &pxor           ($Xi,$T1);              # Ii+Xi
1261
1262         &clmul64x64_T3  ($Xhn,$Xn,$Hkey);       # H*Ii+1
1263         &movdqu         ($Hkey,&QWP(16,$Htbl)); # load H^2
1264
1265         &sub            ($len,0x20);
1266         &lea            ($inp,&DWP(32,$inp));   # i+=2
1267         &jbe            (&label("even_tail"));
1268
1269 &set_label("mod_loop");
1270         &clmul64x64_T3  ($Xhi,$Xi,$Hkey);       # H^2*(Ii+Xi)
1271         &movdqu         ($Hkey,&QWP(0,$Htbl));  # load H
1272
1273         &pxor           ($Xi,$Xn);              # (H*Ii+1) + H^2*(Ii+Xi)
1274         &pxor           ($Xhi,$Xhn);
1275
1276         &reduction_alg5 ($Xhi,$Xi);
1277
1278         #######
1279         &movdqa         ($T3,&QWP(0,$const));
1280         &movdqu         ($T1,&QWP(0,$inp));     # Ii
1281         &movdqu         ($Xn,&QWP(16,$inp));    # Ii+1
1282         &pshufb         ($T1,$T3);
1283         &pshufb         ($Xn,$T3);
1284         &pxor           ($Xi,$T1);              # Ii+Xi
1285
1286         &clmul64x64_T3  ($Xhn,$Xn,$Hkey);       # H*Ii+1
1287         &movdqu         ($Hkey,&QWP(16,$Htbl)); # load H^2
1288
1289         &sub            ($len,0x20);
1290         &lea            ($inp,&DWP(32,$inp));
1291         &ja             (&label("mod_loop"));
1292
1293 &set_label("even_tail");
1294         &clmul64x64_T3  ($Xhi,$Xi,$Hkey);       # H^2*(Ii+Xi)
1295
1296         &pxor           ($Xi,$Xn);              # (H*Ii+1) + H^2*(Ii+Xi)
1297         &pxor           ($Xhi,$Xhn);
1298
1299         &reduction_alg5 ($Xhi,$Xi);
1300
1301         &movdqa         ($T3,&QWP(0,$const));
1302         &test           ($len,$len);
1303         &jnz            (&label("done"));
1304
1305         &movdqu         ($Hkey,&QWP(0,$Htbl));  # load H
1306 &set_label("odd_tail");
1307         &movdqu         ($T1,&QWP(0,$inp));     # Ii
1308         &pshufb         ($T1,$T3);
1309         &pxor           ($Xi,$T1);              # Ii+Xi
1310
1311         &clmul64x64_T3  ($Xhi,$Xi,$Hkey);       # H*(Ii+Xi)
1312         &reduction_alg5 ($Xhi,$Xi);
1313
1314         &movdqa         ($T3,&QWP(0,$const));
1315 &set_label("done");
1316         &pshufb         ($Xi,$T3);
1317         &movdqu         (&QWP(0,$Xip),$Xi);
1318 &function_end("gcm_ghash_clmul");
1319
1320 }
1321 \f
1322 &set_label("bswap",64);
1323         &data_byte(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
1324         &data_byte(1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2); # 0x1c2_polynomial
1325 &set_label("rem_8bit",64);
1326         &data_short(0x0000,0x01C2,0x0384,0x0246,0x0708,0x06CA,0x048C,0x054E);
1327         &data_short(0x0E10,0x0FD2,0x0D94,0x0C56,0x0918,0x08DA,0x0A9C,0x0B5E);
1328         &data_short(0x1C20,0x1DE2,0x1FA4,0x1E66,0x1B28,0x1AEA,0x18AC,0x196E);
1329         &data_short(0x1230,0x13F2,0x11B4,0x1076,0x1538,0x14FA,0x16BC,0x177E);
1330         &data_short(0x3840,0x3982,0x3BC4,0x3A06,0x3F48,0x3E8A,0x3CCC,0x3D0E);
1331         &data_short(0x3650,0x3792,0x35D4,0x3416,0x3158,0x309A,0x32DC,0x331E);
1332         &data_short(0x2460,0x25A2,0x27E4,0x2626,0x2368,0x22AA,0x20EC,0x212E);
1333         &data_short(0x2A70,0x2BB2,0x29F4,0x2836,0x2D78,0x2CBA,0x2EFC,0x2F3E);
1334         &data_short(0x7080,0x7142,0x7304,0x72C6,0x7788,0x764A,0x740C,0x75CE);
1335         &data_short(0x7E90,0x7F52,0x7D14,0x7CD6,0x7998,0x785A,0x7A1C,0x7BDE);
1336         &data_short(0x6CA0,0x6D62,0x6F24,0x6EE6,0x6BA8,0x6A6A,0x682C,0x69EE);
1337         &data_short(0x62B0,0x6372,0x6134,0x60F6,0x65B8,0x647A,0x663C,0x67FE);
1338         &data_short(0x48C0,0x4902,0x4B44,0x4A86,0x4FC8,0x4E0A,0x4C4C,0x4D8E);
1339         &data_short(0x46D0,0x4712,0x4554,0x4496,0x41D8,0x401A,0x425C,0x439E);
1340         &data_short(0x54E0,0x5522,0x5764,0x56A6,0x53E8,0x522A,0x506C,0x51AE);
1341         &data_short(0x5AF0,0x5B32,0x5974,0x58B6,0x5DF8,0x5C3A,0x5E7C,0x5FBE);
1342         &data_short(0xE100,0xE0C2,0xE284,0xE346,0xE608,0xE7CA,0xE58C,0xE44E);
1343         &data_short(0xEF10,0xEED2,0xEC94,0xED56,0xE818,0xE9DA,0xEB9C,0xEA5E);
1344         &data_short(0xFD20,0xFCE2,0xFEA4,0xFF66,0xFA28,0xFBEA,0xF9AC,0xF86E);
1345         &data_short(0xF330,0xF2F2,0xF0B4,0xF176,0xF438,0xF5FA,0xF7BC,0xF67E);
1346         &data_short(0xD940,0xD882,0xDAC4,0xDB06,0xDE48,0xDF8A,0xDDCC,0xDC0E);
1347         &data_short(0xD750,0xD692,0xD4D4,0xD516,0xD058,0xD19A,0xD3DC,0xD21E);
1348         &data_short(0xC560,0xC4A2,0xC6E4,0xC726,0xC268,0xC3AA,0xC1EC,0xC02E);
1349         &data_short(0xCB70,0xCAB2,0xC8F4,0xC936,0xCC78,0xCDBA,0xCFFC,0xCE3E);
1350         &data_short(0x9180,0x9042,0x9204,0x93C6,0x9688,0x974A,0x950C,0x94CE);
1351         &data_short(0x9F90,0x9E52,0x9C14,0x9DD6,0x9898,0x995A,0x9B1C,0x9ADE);
1352         &data_short(0x8DA0,0x8C62,0x8E24,0x8FE6,0x8AA8,0x8B6A,0x892C,0x88EE);
1353         &data_short(0x83B0,0x8272,0x8034,0x81F6,0x84B8,0x857A,0x873C,0x86FE);
1354         &data_short(0xA9C0,0xA802,0xAA44,0xAB86,0xAEC8,0xAF0A,0xAD4C,0xAC8E);
1355         &data_short(0xA7D0,0xA612,0xA454,0xA596,0xA0D8,0xA11A,0xA35C,0xA29E);
1356         &data_short(0xB5E0,0xB422,0xB664,0xB7A6,0xB2E8,0xB32A,0xB16C,0xB0AE);
1357         &data_short(0xBBF0,0xBA32,0xB874,0xB9B6,0xBCF8,0xBD3A,0xBF7C,0xBEBE);
1358 }}      # $sse2
1359
1360 &set_label("rem_4bit",64);
1361         &data_word(0,0x0000<<$S,0,0x1C20<<$S,0,0x3840<<$S,0,0x2460<<$S);
1362         &data_word(0,0x7080<<$S,0,0x6CA0<<$S,0,0x48C0<<$S,0,0x54E0<<$S);
1363         &data_word(0,0xE100<<$S,0,0xFD20<<$S,0,0xD940<<$S,0,0xC560<<$S);
1364         &data_word(0,0x9180<<$S,0,0x8DA0<<$S,0,0xA9C0<<$S,0,0xB5E0<<$S);
1365 }}}     # !$x86only
1366
1367 &asciz("GHASH for x86, CRYPTOGAMS by <appro\@openssl.org>");
1368 &asm_finish();
1369
1370 # A question was risen about choice of vanilla MMX. Or rather why wasn't
1371 # SSE2 chosen instead? In addition to the fact that MMX runs on legacy
1372 # CPUs such as PIII, "4-bit" MMX version was observed to provide better
1373 # performance than *corresponding* SSE2 one even on contemporary CPUs.
1374 # SSE2 results were provided by Peter-Michael Hager. He maintains SSE2
1375 # implementation featuring full range of lookup-table sizes, but with
1376 # per-invocation lookup table setup. Latter means that table size is
1377 # chosen depending on how much data is to be hashed in every given call,
1378 # more data - larger table. Best reported result for Core2 is ~4 cycles
1379 # per processed byte out of 64KB block. This number accounts even for
1380 # 64KB table setup overhead. As discussed in gcm128.c we choose to be
1381 # more conservative in respect to lookup table sizes, but how do the
1382 # results compare? Minimalistic "256B" MMX version delivers ~11 cycles
1383 # on same platform. As also discussed in gcm128.c, next in line "8-bit
1384 # Shoup's" or "4KB" method should deliver twice the performance of
1385 # "256B" one, in other words not worse than ~6 cycles per byte. It
1386 # should be also be noted that in SSE2 case improvement can be "super-
1387 # linear," i.e. more than twice, mostly because >>8 maps to single
1388 # instruction on SSE2 register. This is unlike "4-bit" case when >>4
1389 # maps to same amount of instructions in both MMX and SSE2 cases.
1390 # Bottom line is that switch to SSE2 is considered to be justifiable
1391 # only in case we choose to implement "8-bit" method...