3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
12 # The module implements "4-bit" GCM GHASH function and underlying
13 # single multiplication operation in GF(2^128). "4-bit" means that it
14 # uses 256 bytes per-key table [+64/128 bytes fixed table]. It has two
15 # code paths: vanilla x86 and vanilla MMX. Former will be executed on
16 # 486 and Pentium, latter on all others. Performance results are for
17 # streamed GHASH subroutine and are expressed in cycles per processed
18 # byte, less is better:
20 # gcc 2.95.3(*) MMX assembler x86 assembler
22 # Pentium 100/112(**) - 50
24 # P4 96 /122 30 84(***)
25 # Opteron 50 /71 21 30
26 # Core2 54 /68 12.5 18
28 # (*) gcc 3.4.x was observed to generate few percent slower code,
29 # which is one of reasons why 2.95.3 results were chosen,
30 # another reason is lack of 3.4.x results for older CPUs;
31 # (**) second number is result for code compiled with -fPIC flag,
32 # which is actually more relevant, because assembler code is
33 # position-independent;
34 # (***) see comment in non-MMX routine for further details;
36 # To summarize, it's >2-3 times faster than gcc-generated code. To
37 # anchor it to something else SHA1 assembler processes one byte in
38 # 11-13 cycles on contemporary x86 cores.
42 # Add PCLMULQDQ version performing at 2.13 cycles per processed byte.
43 # The question is how close is it to theoretical limit? The pclmulqdq
44 # instruction latency appears to be 14 cycles and there can't be more
45 # than 2 of them executing at any given time. This means that single
46 # Karatsuba multiplication would take 28 cycles *plus* few cycles for
47 # pre- and post-processing. Then multiplication has to be followed by
48 # modulo-reduction. Given that aggregated reduction method [see
49 # "Carry-less Multiplication and Its Usage for Computing the GCM Mode"
50 # white paper by Intel] allows you to perform reduction only once in
51 # a while we can assume that asymptotic performance can be estimated
52 # as (28+Tmod/Naggr)/16, where Tmod is time to perform reduction
53 # and Naggr is the aggregation factor.
55 # Before we proceed to this implementation let's have closer look at
56 # the best-performing code suggested by Intel in their white paper.
57 # By tracing inter-register dependencies Tmod is estimated as ~19
58 # cycles and Naggr is 4, resulting in 2.05 cycles per processed byte.
59 # As implied, this is quite optimistic estimate, because it does not
60 # account for Karatsuba pre- and post-processing, which for a single
61 # multiplication is ~5 cycles. Unfortunately Intel does not provide
62 # performance data for GHASH alone, only for fused GCM mode. But
63 # we can estimate it by subtracting CTR performance result provided
64 # in "AES Instruction Set" white paper: 3.54-1.38=2.16 cycles per
65 # processed byte or 5% off the estimate. It should be noted though
66 # that 3.54 is GCM result for 16KB block size, while 1.38 is CTR for
67 # 1KB block size, meaning that real number is likely to be a bit
68 # further from estimate.
70 # Moving on to the implementation in question. Tmod is estimated as
71 # ~13 cycles and Naggr is 2, giving asymptotic performance of ...
72 # 2.16. How is it possible that measured performance is better than
73 # optimistic theoretical estimate? There is one thing Intel failed
74 # to recognize. By fusing GHASH with CTR former's performance is
75 # really limited to above (Tmul + Tmod/Naggr) equation. But if GHASH
76 # procedure is detached, the modulo-reduction can be interleaved with
77 # Naggr-1 multiplications and under ideal conditions even disappear
78 # from the equation. So that optimistic theoretical estimate for this
79 # implementation is ... 28/16=1.75, and not 2.16. Well, it's probably
80 # way too optimistic, at least for such small Naggr. I'd argue that
81 # (28+Tproc/Naggr), where Tproc is time required for Karatsuba pre-
82 # and post-processing, is more realistic estimate. In this case it
83 # gives ... 1.91 cycles per processed byte. Or in other words,
84 # depending on how well we can interleave reduction and one of the
85 # two multiplications the performance should be betwen 1.91 and 2.16.
86 # As already mentioned, this implementation processes one byte [out
87 # of 1KB buffer] in 2.13 cycles, while x86_64 counterpart - in 2.07.
88 # x86_64 performance is better, because larger register bank allows
89 # to interleave reduction and multiplication better.
91 # Does it make sense to increase Naggr? To start with it's virtually
92 # impossible in 32-bit mode, because of limited register bank
93 # capacity. Otherwise improvement has to be weighed agiainst slower
94 # setup, as well as code size and complexity increase. As even
95 # optimistic estimate doesn't promise 30% performance improvement,
96 # there are currently no plans to increase Naggr.
98 # Special thanks to David Woodhouse <dwmw2@infradead.org> for
99 # providing access to a Westmere-based system on behalf of Intel
100 # Open Source Technology Centre.
102 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
103 push(@INC,"${dir}","${dir}../../perlasm");
106 &asm_init($ARGV[0],"ghash-x86.pl",$x86only = $ARGV[$#ARGV] eq "386");
109 for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
111 ($Zhh,$Zhl,$Zlh,$Zll) = ("ebp","edx","ecx","ebx");
115 $unroll = 0; # Affects x86 loop. Folded loop performs ~7% worse
116 # than unrolled, which has to be weighted against
117 # 2.5x x86-specific code size reduction.
123 &mov ($Zhh,&DWP(4,$Htbl,$Zll));
124 &mov ($Zhl,&DWP(0,$Htbl,$Zll));
125 &mov ($Zlh,&DWP(12,$Htbl,$Zll));
126 &mov ($Zll,&DWP(8,$Htbl,$Zll));
127 &xor ($rem,$rem); # avoid partial register stalls on PIII
129 # shrd practically kills P4, 2.5x deterioration, but P4 has
130 # MMX code-path to execute. shrd runs tad faster [than twice
131 # the shifts, move's and or's] on pre-MMX Pentium (as well as
132 # PIII and Core2), *but* minimizes code size, spares register
133 # and thus allows to fold the loop...
137 &jmp (&label("x86_loop"));
138 &set_label("x86_loop",16);
139 for($i=1;$i<=2;$i++) {
140 &mov (&LB($rem),&LB($Zll));
142 &and (&LB($rem),0xf);
146 &xor ($Zhh,&DWP($off+16,"esp",$rem,4));
148 &mov (&LB($rem),&BP($off,"esp",$cnt));
150 &and (&LB($rem),0xf0);
155 &xor ($Zll,&DWP(8,$Htbl,$rem));
156 &xor ($Zlh,&DWP(12,$Htbl,$rem));
157 &xor ($Zhl,&DWP(0,$Htbl,$rem));
158 &xor ($Zhh,&DWP(4,$Htbl,$rem));
162 &js (&label("x86_break"));
164 &jmp (&label("x86_loop"));
167 &set_label("x86_break",16);
169 for($i=1;$i<32;$i++) {
171 &mov (&LB($rem),&LB($Zll));
173 &and (&LB($rem),0xf);
177 &xor ($Zhh,&DWP($off+16,"esp",$rem,4));
180 &mov (&LB($rem),&BP($off+15-($i>>1),"esp"));
181 &and (&LB($rem),0xf0);
183 &mov (&LB($rem),&BP($off+15-($i>>1),"esp"));
187 &xor ($Zll,&DWP(8,$Htbl,$rem));
188 &xor ($Zlh,&DWP(12,$Htbl,$rem));
189 &xor ($Zhl,&DWP(0,$Htbl,$rem));
190 &xor ($Zhh,&DWP(4,$Htbl,$rem));
206 &function_begin_B("_x86_gmult_4bit_inner");
209 &function_end_B("_x86_gmult_4bit_inner");
212 sub deposit_rem_4bit {
215 &mov (&DWP($bias+0, "esp"),0x0000<<16);
216 &mov (&DWP($bias+4, "esp"),0x1C20<<16);
217 &mov (&DWP($bias+8, "esp"),0x3840<<16);
218 &mov (&DWP($bias+12,"esp"),0x2460<<16);
219 &mov (&DWP($bias+16,"esp"),0x7080<<16);
220 &mov (&DWP($bias+20,"esp"),0x6CA0<<16);
221 &mov (&DWP($bias+24,"esp"),0x48C0<<16);
222 &mov (&DWP($bias+28,"esp"),0x54E0<<16);
223 &mov (&DWP($bias+32,"esp"),0xE100<<16);
224 &mov (&DWP($bias+36,"esp"),0xFD20<<16);
225 &mov (&DWP($bias+40,"esp"),0xD940<<16);
226 &mov (&DWP($bias+44,"esp"),0xC560<<16);
227 &mov (&DWP($bias+48,"esp"),0x9180<<16);
228 &mov (&DWP($bias+52,"esp"),0x8DA0<<16);
229 &mov (&DWP($bias+56,"esp"),0xA9C0<<16);
230 &mov (&DWP($bias+60,"esp"),0xB5E0<<16);
233 $suffix = $x86only ? "" : "_x86";
235 &function_begin("gcm_gmult_4bit".$suffix);
236 &stack_push(16+4+1); # +1 for stack alignment
237 &mov ($inp,&wparam(0)); # load Xi
238 &mov ($Htbl,&wparam(1)); # load Htable
240 &mov ($Zhh,&DWP(0,$inp)); # load Xi[16]
241 &mov ($Zhl,&DWP(4,$inp));
242 &mov ($Zlh,&DWP(8,$inp));
243 &mov ($Zll,&DWP(12,$inp));
245 &deposit_rem_4bit(16);
247 &mov (&DWP(0,"esp"),$Zhh); # copy Xi[16] on stack
248 &mov (&DWP(4,"esp"),$Zhl);
249 &mov (&DWP(8,"esp"),$Zlh);
250 &mov (&DWP(12,"esp"),$Zll);
255 &call ("_x86_gmult_4bit_inner");
258 &mov ($inp,&wparam(0));
261 &mov (&DWP(12,$inp),$Zll);
262 &mov (&DWP(8,$inp),$Zlh);
263 &mov (&DWP(4,$inp),$Zhl);
264 &mov (&DWP(0,$inp),$Zhh);
266 &function_end("gcm_gmult_4bit".$suffix);
268 &function_begin("gcm_ghash_4bit".$suffix);
269 &stack_push(16+4+1); # +1 for 64-bit alignment
270 &mov ($Zll,&wparam(0)); # load Xi
271 &mov ($Htbl,&wparam(1)); # load Htable
272 &mov ($inp,&wparam(2)); # load in
273 &mov ("ecx",&wparam(3)); # load len
275 &mov (&wparam(3),"ecx");
277 &mov ($Zhh,&DWP(0,$Zll)); # load Xi[16]
278 &mov ($Zhl,&DWP(4,$Zll));
279 &mov ($Zlh,&DWP(8,$Zll));
280 &mov ($Zll,&DWP(12,$Zll));
282 &deposit_rem_4bit(16);
284 &set_label("x86_outer_loop",16);
285 &xor ($Zll,&DWP(12,$inp)); # xor with input
286 &xor ($Zlh,&DWP(8,$inp));
287 &xor ($Zhl,&DWP(4,$inp));
288 &xor ($Zhh,&DWP(0,$inp));
289 &mov (&DWP(12,"esp"),$Zll); # dump it on stack
290 &mov (&DWP(8,"esp"),$Zlh);
291 &mov (&DWP(4,"esp"),$Zhl);
292 &mov (&DWP(0,"esp"),$Zhh);
298 &call ("_x86_gmult_4bit_inner");
301 &mov ($inp,&wparam(2));
303 &lea ($inp,&DWP(16,$inp));
304 &cmp ($inp,&wparam(3));
305 &mov (&wparam(2),$inp) if (!$unroll);
306 &jb (&label("x86_outer_loop"));
308 &mov ($inp,&wparam(0)); # load Xi
309 &mov (&DWP(12,$inp),$Zll);
310 &mov (&DWP(8,$inp),$Zlh);
311 &mov (&DWP(4,$inp),$Zhl);
312 &mov (&DWP(0,$inp),$Zhh);
314 &function_end("gcm_ghash_4bit".$suffix);
318 &static_label("rem_4bit");
321 # MMX version performs 2.8 times better on P4 (see comment in non-MMX
322 # routine for further details), 40% better on Opteron and Core2, 50%
323 # better on PIII... In other words effort is considered to be well
326 my $rem_4bit = shift;
332 my ($Zlo,$Zhi) = ("mm0","mm1");
335 &xor ($nlo,$nlo); # avoid partial register stalls on PIII
337 &mov (&LB($nlo),&LB($nhi));
341 &movq ($Zlo,&QWP(8,$Htbl,$nlo));
342 &movq ($Zhi,&QWP(0,$Htbl,$nlo));
344 &jmp (&label("mmx_loop"));
346 &set_label("mmx_loop",16);
351 &pxor ($Zlo,&QWP(8,$Htbl,$nhi));
352 &mov (&LB($nlo),&BP(0,$inp,$cnt));
354 &pxor ($Zhi,&QWP(0,$rem_4bit,$rem,8));
357 &pxor ($Zhi,&QWP(0,$Htbl,$nhi));
360 &js (&label("mmx_break"));
368 &pxor ($Zlo,&QWP(8,$Htbl,$nlo));
370 &pxor ($Zhi,&QWP(0,$rem_4bit,$rem,8));
372 &pxor ($Zhi,&QWP(0,$Htbl,$nlo));
374 &jmp (&label("mmx_loop"));
376 &set_label("mmx_break",16);
383 &pxor ($Zlo,&QWP(8,$Htbl,$nlo));
385 &pxor ($Zhi,&QWP(0,$rem_4bit,$rem,8));
387 &pxor ($Zhi,&QWP(0,$Htbl,$nlo));
394 &pxor ($Zlo,&QWP(8,$Htbl,$nhi));
396 &pxor ($Zhi,&QWP(0,$rem_4bit,$rem,8));
398 &pxor ($Zhi,&QWP(0,$Htbl,$nhi));
401 &psrlq ($Zlo,32); # lower part of Zlo is already there
413 &function_begin("gcm_gmult_4bit_mmx");
414 &mov ($inp,&wparam(0)); # load Xi
415 &mov ($Htbl,&wparam(1)); # load Htable
417 &call (&label("pic_point"));
418 &set_label("pic_point");
420 &lea ("eax",&DWP(&label("rem_4bit")."-".&label("pic_point"),"eax"));
422 &movz ($Zll,&BP(15,$inp));
424 &mmx_loop($inp,"eax");
427 &mov (&DWP(12,$inp),$Zll);
428 &mov (&DWP(4,$inp),$Zhl);
429 &mov (&DWP(8,$inp),$Zlh);
430 &mov (&DWP(0,$inp),$Zhh);
431 &function_end("gcm_gmult_4bit_mmx");
433 # Streamed version performs 20% better on P4, 7% on Opteron,
434 # 10% on Core2 and PIII...
435 &function_begin("gcm_ghash_4bit_mmx");
436 &mov ($Zhh,&wparam(0)); # load Xi
437 &mov ($Htbl,&wparam(1)); # load Htable
438 &mov ($inp,&wparam(2)); # load in
439 &mov ($Zlh,&wparam(3)); # load len
441 &call (&label("pic_point"));
442 &set_label("pic_point");
444 &lea ("eax",&DWP(&label("rem_4bit")."-".&label("pic_point"),"eax"));
447 &mov (&wparam(3),$Zlh); # len to point at the end of input
448 &stack_push(4+1); # +1 for stack alignment
450 &mov ($Zll,&DWP(12,$Zhh)); # load Xi[16]
451 &mov ($Zhl,&DWP(4,$Zhh));
452 &mov ($Zlh,&DWP(8,$Zhh));
453 &mov ($Zhh,&DWP(0,$Zhh));
454 &jmp (&label("mmx_outer_loop"));
456 &set_label("mmx_outer_loop",16);
457 &xor ($Zll,&DWP(12,$inp));
458 &xor ($Zhl,&DWP(4,$inp));
459 &xor ($Zlh,&DWP(8,$inp));
460 &xor ($Zhh,&DWP(0,$inp));
461 &mov (&DWP(12,"esp"),$Zll);
462 &mov (&DWP(4,"esp"),$Zhl);
463 &mov (&DWP(8,"esp"),$Zlh);
464 &mov (&DWP(0,"esp"),$Zhh);
468 &mmx_loop("esp","eax");
470 &lea ($inp,&DWP(16,$inp));
471 &cmp ($inp,&wparam(3));
472 &jb (&label("mmx_outer_loop"));
474 &mov ($inp,&wparam(0)); # load Xi
476 &mov (&DWP(12,$inp),$Zll);
477 &mov (&DWP(4,$inp),$Zhl);
478 &mov (&DWP(8,$inp),$Zlh);
479 &mov (&DWP(0,$inp),$Zhh);
482 &function_end("gcm_ghash_4bit_mmx");
485 ######################################################################
494 ($Xi,$Xhi)=("xmm0","xmm1"); $Hkey="xmm2";
495 ($T1,$T2,$T3)=("xmm3","xmm4","xmm5");
496 ($Xn,$Xhn)=("xmm6","xmm7");
498 &static_label("bswap");
500 sub clmul64x64_T2 { # minimal "register" pressure
501 my ($Xhi,$Xi,$Hkey)=@_;
503 &movdqa ($Xhi,$Xi); #
504 &pshufd ($T1,$Xi,0b01001110);
505 &pshufd ($T2,$Hkey,0b01001110);
509 &pclmulqdq ($Xi,$Hkey,0x00); #######
510 &pclmulqdq ($Xhi,$Hkey,0x11); #######
511 &pclmulqdq ($T1,$T2,0x00); #######
523 # Even though this subroutine offers visually better ILP, it
524 # was empirically found to be a tad slower than above version.
525 # At least in gcm_ghash_clmul context. But it's just as well,
526 # because loop modulo-scheduling is possible only thanks to
527 # minimized "register" pressure...
528 my ($Xhi,$Xi,$Hkey)=@_;
532 &pclmulqdq ($Xi,$Hkey,0x00); #######
533 &pclmulqdq ($Xhi,$Hkey,0x11); #######
534 &pshufd ($T2,$T1,0b01001110); #
535 &pshufd ($T3,$Hkey,0b01001110);
538 &pclmulqdq ($T2,$T3,0x00); #######
549 if (1) { # Algorithm 9 with <<1 twist.
550 # Reduction is shorter and uses only two
551 # temporary registers, which makes it better
552 # candidate for interleaving with 64x64
553 # multiplication. Pre-modulo-scheduled loop
554 # was found to be ~20% faster than Algorithm 5
555 # below. Algorithm 9 was then chosen and
556 # optimized further...
558 sub reduction_alg9 { # 17/13 times faster than Intel version
585 &function_begin_B("gcm_init_clmul");
586 &mov ($Htbl,&wparam(0));
587 &mov ($Xip,&wparam(1));
589 &call (&label("pic"));
592 &lea ($const,&DWP(&label("bswap")."-".&label("pic"),$const));
594 &movdqu ($Hkey,&QWP(0,$Xip));
595 &pshufd ($Hkey,$Hkey,0b01001110);# dword swap
598 &pshufd ($T2,$Hkey,0b11111111); # broadcast uppermost dword
603 &pcmpgtd ($T3,$T2); # broadcast carry bit
605 &por ($Hkey,$T1); # H<<=1
608 &pand ($T3,&QWP(16,$const)); # 0x1c2_polynomial
609 &pxor ($Hkey,$T3); # if(carry) H^=0x1c2_polynomial
613 &clmul64x64_T2 ($Xhi,$Xi,$Hkey);
614 &reduction_alg9 ($Xhi,$Xi);
616 &movdqu (&QWP(0,$Htbl),$Hkey); # save H
617 &movdqu (&QWP(16,$Htbl),$Xi); # save H^2
620 &function_end_B("gcm_init_clmul");
622 &function_begin_B("gcm_gmult_clmul");
623 &mov ($Xip,&wparam(0));
624 &mov ($Htbl,&wparam(1));
626 &call (&label("pic"));
629 &lea ($const,&DWP(&label("bswap")."-".&label("pic"),$const));
631 &movdqu ($Xi,&QWP(0,$Xip));
632 &movdqa ($T3,&QWP(0,$const));
633 &movdqu ($Hkey,&QWP(0,$Htbl));
636 &clmul64x64_T2 ($Xhi,$Xi,$Hkey);
637 &reduction_alg9 ($Xhi,$Xi);
640 &movdqu (&QWP(0,$Xip),$Xi);
643 &function_end_B("gcm_gmult_clmul");
645 &function_begin("gcm_ghash_clmul");
646 &mov ($Xip,&wparam(0));
647 &mov ($Htbl,&wparam(1));
648 &mov ($inp,&wparam(2));
649 &mov ($len,&wparam(3));
651 &call (&label("pic"));
654 &lea ($const,&DWP(&label("bswap")."-".&label("pic"),$const));
656 &movdqu ($Xi,&QWP(0,$Xip));
657 &movdqa ($T3,&QWP(0,$const));
658 &movdqu ($Hkey,&QWP(0,$Htbl));
662 &jz (&label("odd_tail"));
665 # Xi+2 =[H*(Ii+1 + Xi+1)] mod P =
666 # [(H*Ii+1) + (H*Xi+1)] mod P =
667 # [(H*Ii+1) + H^2*(Ii+Xi)] mod P
669 &movdqu ($T1,&QWP(0,$inp)); # Ii
670 &movdqu ($Xn,&QWP(16,$inp)); # Ii+1
673 &pxor ($Xi,$T1); # Ii+Xi
675 &clmul64x64_T2 ($Xhn,$Xn,$Hkey); # H*Ii+1
676 &movdqu ($Hkey,&QWP(16,$Htbl)); # load H^2
678 &lea ($inp,&DWP(32,$inp)); # i+=2
680 &jbe (&label("even_tail"));
682 &set_label("mod_loop");
683 &clmul64x64_T2 ($Xhi,$Xi,$Hkey); # H^2*(Ii+Xi)
684 &movdqu ($T1,&QWP(0,$inp)); # Ii
685 &movdqu ($Hkey,&QWP(0,$Htbl)); # load H
687 &pxor ($Xi,$Xn); # (H*Ii+1) + H^2*(Ii+Xi)
690 &movdqu ($Xn,&QWP(16,$inp)); # Ii+1
694 &movdqa ($T3,$Xn); #&clmul64x64_TX ($Xhn,$Xn,$Hkey); H*Ii+1
696 &pxor ($Xhi,$T1); # "Ii+Xi", consume early
698 &movdqa ($T1,$Xi) #&reduction_alg9($Xhi,$Xi); 1st phase
703 &pclmulqdq ($Xn,$Hkey,0x00); #######
709 &pshufd ($T1,$T3,0b01001110);
712 &pshufd ($T3,$Hkey,0b01001110);
715 &pclmulqdq ($Xhn,$Hkey,0x11); #######
716 &movdqa ($T2,$Xi); # 2nd phase
725 &pclmulqdq ($T1,$T3,0x00); #######
726 &movdqu ($Hkey,&QWP(16,$Htbl)); # load H^2
735 &movdqa ($T3,&QWP(0,$const));
737 &lea ($inp,&DWP(32,$inp));
739 &ja (&label("mod_loop"));
741 &set_label("even_tail");
742 &clmul64x64_T2 ($Xhi,$Xi,$Hkey); # H^2*(Ii+Xi)
744 &pxor ($Xi,$Xn); # (H*Ii+1) + H^2*(Ii+Xi)
747 &reduction_alg9 ($Xhi,$Xi);
750 &jnz (&label("done"));
752 &movdqu ($Hkey,&QWP(0,$Htbl)); # load H
753 &set_label("odd_tail");
754 &movdqu ($T1,&QWP(0,$inp)); # Ii
756 &pxor ($Xi,$T1); # Ii+Xi
758 &clmul64x64_T2 ($Xhi,$Xi,$Hkey); # H*(Ii+Xi)
759 &reduction_alg9 ($Xhi,$Xi);
763 &movdqu (&QWP(0,$Xip),$Xi);
764 &function_end("gcm_ghash_clmul");
766 } else { # Algorith 5. Kept for reference purposes.
768 sub reduction_alg5 { # 19/16 times faster than Intel version
813 &function_begin_B("gcm_init_clmul");
814 &mov ($Htbl,&wparam(0));
815 &mov ($Xip,&wparam(1));
817 &call (&label("pic"));
820 &lea ($const,&DWP(&label("bswap")."-".&label("pic"),$const));
822 &movdqu ($Hkey,&QWP(0,$Xip));
823 &pshufd ($Hkey,$Hkey,0b01001110);# dword swap
827 &clmul64x64_T3 ($Xhi,$Xi,$Hkey);
828 &reduction_alg5 ($Xhi,$Xi);
830 &movdqu (&QWP(0,$Htbl),$Hkey); # save H
831 &movdqu (&QWP(16,$Htbl),$Xi); # save H^2
834 &function_end_B("gcm_init_clmul");
836 &function_begin_B("gcm_gmult_clmul");
837 &mov ($Xip,&wparam(0));
838 &mov ($Htbl,&wparam(1));
840 &call (&label("pic"));
843 &lea ($const,&DWP(&label("bswap")."-".&label("pic"),$const));
845 &movdqu ($Xi,&QWP(0,$Xip));
846 &movdqa ($Xn,&QWP(0,$const));
847 &movdqu ($Hkey,&QWP(0,$Htbl));
850 &clmul64x64_T3 ($Xhi,$Xi,$Hkey);
851 &reduction_alg5 ($Xhi,$Xi);
854 &movdqu (&QWP(0,$Xip),$Xi);
857 &function_end_B("gcm_gmult_clmul");
859 &function_begin("gcm_ghash_clmul");
860 &mov ($Xip,&wparam(0));
861 &mov ($Htbl,&wparam(1));
862 &mov ($inp,&wparam(2));
863 &mov ($len,&wparam(3));
865 &call (&label("pic"));
868 &lea ($const,&DWP(&label("bswap")."-".&label("pic"),$const));
870 &movdqu ($Xi,&QWP(0,$Xip));
871 &movdqa ($T3,&QWP(0,$const));
872 &movdqu ($Hkey,&QWP(0,$Htbl));
876 &jz (&label("odd_tail"));
879 # Xi+2 =[H*(Ii+1 + Xi+1)] mod P =
880 # [(H*Ii+1) + (H*Xi+1)] mod P =
881 # [(H*Ii+1) + H^2*(Ii+Xi)] mod P
883 &movdqu ($T1,&QWP(0,$inp)); # Ii
884 &movdqu ($Xn,&QWP(16,$inp)); # Ii+1
887 &pxor ($Xi,$T1); # Ii+Xi
889 &clmul64x64_T3 ($Xhn,$Xn,$Hkey); # H*Ii+1
890 &movdqu ($Hkey,&QWP(16,$Htbl)); # load H^2
893 &lea ($inp,&DWP(32,$inp)); # i+=2
894 &jbe (&label("even_tail"));
896 &set_label("mod_loop");
897 &clmul64x64_T3 ($Xhi,$Xi,$Hkey); # H^2*(Ii+Xi)
898 &movdqu ($Hkey,&QWP(0,$Htbl)); # load H
900 &pxor ($Xi,$Xn); # (H*Ii+1) + H^2*(Ii+Xi)
903 &reduction_alg5 ($Xhi,$Xi);
906 &movdqa ($T3,&QWP(0,$const));
907 &movdqu ($T1,&QWP(0,$inp)); # Ii
908 &movdqu ($Xn,&QWP(16,$inp)); # Ii+1
911 &pxor ($Xi,$T1); # Ii+Xi
913 &clmul64x64_T3 ($Xhn,$Xn,$Hkey); # H*Ii+1
914 &movdqu ($Hkey,&QWP(16,$Htbl)); # load H^2
917 &lea ($inp,&DWP(32,$inp));
918 &ja (&label("mod_loop"));
920 &set_label("even_tail");
921 &clmul64x64_T3 ($Xhi,$Xi,$Hkey); # H^2*(Ii+Xi)
923 &pxor ($Xi,$Xn); # (H*Ii+1) + H^2*(Ii+Xi)
926 &reduction_alg5 ($Xhi,$Xi);
928 &movdqa ($T3,&QWP(0,$const));
930 &jnz (&label("done"));
932 &movdqu ($Hkey,&QWP(0,$Htbl)); # load H
933 &set_label("odd_tail");
934 &movdqu ($T1,&QWP(0,$inp)); # Ii
936 &pxor ($Xi,$T1); # Ii+Xi
938 &clmul64x64_T3 ($Xhi,$Xi,$Hkey); # H*(Ii+Xi)
939 &reduction_alg5 ($Xhi,$Xi);
941 &movdqa ($T3,&QWP(0,$const));
944 &movdqu (&QWP(0,$Xip),$Xi);
945 &function_end("gcm_ghash_clmul");
949 &set_label("bswap",64);
950 &data_byte(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
951 &data_byte(1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2); # 0x1c2_polynomial
954 &set_label("rem_4bit",64);
955 &data_word(0,0x0000<<16,0,0x1C20<<16,0,0x3840<<16,0,0x2460<<16);
956 &data_word(0,0x7080<<16,0,0x6CA0<<16,0,0x48C0<<16,0,0x54E0<<16);
957 &data_word(0,0xE100<<16,0,0xFD20<<16,0,0xD940<<16,0,0xC560<<16);
958 &data_word(0,0x9180<<16,0,0x8DA0<<16,0,0xA9C0<<16,0,0xB5E0<<16);
961 &asciz("GHASH for x86, CRYPTOGAMS by <appro\@openssl.org>");