ghash-x86[_64].pl: ~15% improvement on Atom Silvermont
[openssl.git] / crypto / modes / asm / ghash-c64xplus.pl
1 #!/usr/bin/env perl
2 #
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9 #
10 # December 2011
11 #
12 # The module implements GCM GHASH function and underlying single
13 # multiplication operation in GF(2^128). Even though subroutines
14 # have _4bit suffix, they are not using any tables, but rely on
15 # hardware Galois Field Multiply support. Streamed GHASH processes
16 # byte in ~7 cycles, which is >6x faster than "4-bit" table-driven
17 # code compiled with TI's cl6x 6.0 with -mv6400+ -o2 flags. We are
18 # comparing apples vs. oranges, but compiler surely could have done
19 # better, because theoretical [though not necessarily achievable]
20 # estimate for "4-bit" table-driven implementation is ~12 cycles.
21
22 while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
23 open STDOUT,">$output";
24
25 ($Xip,$Htable,$inp,$len)=("A4","B4","A6","B6"); # arguments
26
27 ($Z0,$Z1,$Z2,$Z3,       $H0, $H1, $H2, $H3,
28                         $H0x,$H1x,$H2x,$H3x)=map("A$_",(16..27));
29 ($H01u,$H01y,$H2u,$H3u, $H0y,$H1y,$H2y,$H3y,
30                         $H0z,$H1z,$H2z,$H3z)=map("B$_",(16..27));
31 ($FF000000,$E10000)=("B30","B31");
32 ($xip,$x0,$x1,$xib)=map("B$_",(6..9));  # $xip zaps $len
33  $xia="A9";
34 ($rem,$res)=("B4","B5");                # $rem zaps $Htable
35
36 $code.=<<___;
37         .text
38         .if     __TI_EABI__
39         .asg    gcm_gmult_1bit,_gcm_gmult_1bit
40         .asg    gcm_gmult_4bit,_gcm_gmult_4bit
41         .asg    gcm_ghash_4bit,_gcm_ghash_4bit
42         .endif
43
44         .asg    B3,RA
45
46         .if     0
47         .global _gcm_gmult_1bit
48 _gcm_gmult_1bit:
49         ADDAD   $Htable,2,$Htable
50         .endif
51         .global _gcm_gmult_4bit
52 _gcm_gmult_4bit:
53         .asmfunc
54         LDDW    *${Htable}[-1],$H1:$H0  ; H.lo
55         LDDW    *${Htable}[-2],$H3:$H2  ; H.hi
56 ||      MV      $Xip,${xip}             ; reassign Xi
57 ||      MVK     15,B1                   ; SPLOOPD constant
58
59         MVK     0xE1,$E10000
60 ||      LDBU    *++${xip}[15],$x1       ; Xi[15]
61         MVK     0xFF,$FF000000
62 ||      LDBU    *--${xip},$x0           ; Xi[14]
63         SHL     $E10000,16,$E10000      ; [pre-shifted] reduction polynomial
64         SHL     $FF000000,24,$FF000000  ; upper byte mask
65 ||      BNOP    ghash_loop?
66 ||      MVK     1,B0                    ; take a single spin
67
68         PACKH2  $H0,$H1,$xia            ; pack H0' and H1's upper bytes
69         AND     $H2,$FF000000,$H2u      ; H2's upper byte
70         AND     $H3,$FF000000,$H3u      ; H3's upper byte
71 ||      SHRU    $H2u,8,$H2u
72         SHRU    $H3u,8,$H3u
73 ||      ZERO    $Z1:$Z0
74         SHRU2   $xia,8,$H01u
75 ||      ZERO    $Z3:$Z2
76         .endasmfunc
77
78         .global _gcm_ghash_4bit
79 _gcm_ghash_4bit:
80         .asmfunc
81         LDDW    *${Htable}[-1],$H1:$H0  ; H.lo
82 ||      SHRU    $len,4,B0               ; reassign len
83         LDDW    *${Htable}[-2],$H3:$H2  ; H.hi
84 ||      MV      $Xip,${xip}             ; reassign Xi
85 ||      MVK     15,B1                   ; SPLOOPD constant
86
87         MVK     0xE1,$E10000
88 || [B0] LDNDW   *${inp}[1],$H1x:$H0x
89         MVK     0xFF,$FF000000
90 || [B0] LDNDW   *${inp}++[2],$H3x:$H2x
91         SHL     $E10000,16,$E10000      ; [pre-shifted] reduction polynomial
92 ||      LDDW    *${xip}[1],$Z1:$Z0
93         SHL     $FF000000,24,$FF000000  ; upper byte mask
94 ||      LDDW    *${xip}[0],$Z3:$Z2
95
96         PACKH2  $H0,$H1,$xia            ; pack H0' and H1's upper bytes
97         AND     $H2,$FF000000,$H2u      ; H2's upper byte
98         AND     $H3,$FF000000,$H3u      ; H3's upper byte
99 ||      SHRU    $H2u,8,$H2u
100         SHRU    $H3u,8,$H3u
101         SHRU2   $xia,8,$H01u
102
103 || [B0] XOR     $H0x,$Z0,$Z0            ; Xi^=inp
104 || [B0] XOR     $H1x,$Z1,$Z1
105         .if     .LITTLE_ENDIAN
106    [B0] XOR     $H2x,$Z2,$Z2
107 || [B0] XOR     $H3x,$Z3,$Z3
108 || [B0] SHRU    $Z1,24,$xia             ; Xi[15], avoid cross-path stall
109         STDW    $Z1:$Z0,*${xip}[1]
110 || [B0] SHRU    $Z1,16,$x0              ; Xi[14]
111 || [B0] ZERO    $Z1:$Z0
112         .else
113    [B0] XOR     $H2x,$Z2,$Z2
114 || [B0] XOR     $H3x,$Z3,$Z3
115 || [B0] MV      $Z0,$xia                ; Xi[15], avoid cross-path stall
116         STDW    $Z1:$Z0,*${xip}[1]
117 || [B0] SHRU    $Z0,8,$x0               ; Xi[14]
118 || [B0] ZERO    $Z1:$Z0
119         .endif
120         STDW    $Z3:$Z2,*${xip}[0]
121 || [B0] ZERO    $Z3:$Z2
122 || [B0] MV      $xia,$x1
123    [B0] ADDK    14,${xip}
124
125 ghash_loop?:
126         SPLOOPD 6                       ; 6*16+7
127 ||      MVC     B1,ILC
128 || [B0] SUB     B0,1,B0
129 ||      ZERO    A0
130 ||      ADD     $x1,$x1,$xib            ; SHL   $x1,1,$xib
131 ||      SHL     $x1,1,$xia
132 ___
133 \f
134 ########____________________________
135 #  0    D2.     M1          M2      |
136 #  1            M1                  |
137 #  2            M1          M2      |
138 #  3        D1. M1          M2      |
139 #  4        S1. L1                  |
140 #  5    S2  S1x L1          D2  L2  |____________________________
141 #  6/0          L1  S1      L2  S2x |D2.     M1          M2      |
142 #  7/1          L1  S1  D1x S2  M2  |        M1                  |
143 #  8/2              S1  L1x S2      |        M1          M2      |
144 #  9/3              S1  L1x         |    D1. M1          M2      |
145 # 10/4                  D1x         |    S1. L1                  |
146 # 11/5                              |S2  S1x L1          D2  L2  |____________
147 # 12/6/0                D1x       __|        L1  S1      L2  S2x |D2.     ....
148 #    7/1                                     L1  S1  D1x S2  M2  |        ....
149 #    8/2                                         S1  L1x S2      |        ....
150 #####...                                         ................|............
151 $code.=<<___;
152         XORMPY  $H0,$xia,$H0x           ; 0     ; H·(Xi[i]<<1)
153 ||      XORMPY  $H01u,$xib,$H01y
154 || [A0] LDBU    *--${xip},$x0
155         XORMPY  $H1,$xia,$H1x           ; 1
156         XORMPY  $H2,$xia,$H2x           ; 2
157 ||      XORMPY  $H2u,$xib,$H2y
158         XORMPY  $H3,$xia,$H3x           ; 3
159 ||      XORMPY  $H3u,$xib,$H3y
160 ||[!A0] MVK.D   15,A0                           ; *--${xip} counter
161         XOR.L   $H0x,$Z0,$Z0            ; 4     ; Z^=H·(Xi[i]<<1)
162 || [A0] SUB.S   A0,1,A0
163         XOR.L   $H1x,$Z1,$Z1            ; 5
164 ||      AND.D   $H01y,$FF000000,$H0z
165 ||      SWAP2.L $H01y,$H1y              ;       ; SHL   $H01y,16,$H1y
166 ||      SHL     $x0,1,$xib
167 ||      SHL     $x0,1,$xia
168
169         XOR.L   $H2x,$Z2,$Z2            ; 6/0   ; [0,0] in epilogue
170 ||      SHL     $Z0,1,$rem              ;       ; rem=Z<<1
171 ||      SHRMB.S $Z1,$Z0,$Z0             ;       ; Z>>=8
172 ||      AND.L   $H1y,$FF000000,$H1z
173         XOR.L   $H3x,$Z3,$Z3            ; 7/1
174 ||      SHRMB.S $Z2,$Z1,$Z1
175 ||      XOR.D   $H0z,$Z0,$Z0                    ; merge upper byte products
176 ||      AND.S   $H2y,$FF000000,$H2z
177 ||      XORMPY  $E10000,$rem,$res       ;       ; implicit rem&0x1FE
178         XOR.L   $H1z,$Z1,$Z1            ; 8/2
179 ||      SHRMB.S $Z3,$Z2,$Z2
180 ||      AND.S   $H3y,$FF000000,$H3z
181         XOR.L   $H2z,$Z2,$Z2            ; 9/3
182 ||      SHRU    $Z3,8,$Z3
183         XOR.D   $H3z,$Z3,$Z3            ; 10/4
184         NOP                             ; 11/5
185
186         SPKERNEL 0,2
187 ||      XOR.D   $res,$Z3,$Z3            ; 12/6/0; Z^=res
188
189         ; input pre-fetch is possible where D1 slot is available...
190    [B0] LDNDW   *${inp}[1],$H1x:$H0x    ; 8/-
191    [B0] LDNDW   *${inp}++[2],$H3x:$H2x  ; 9/-
192         NOP                             ; 10/-
193         .if     .LITTLE_ENDIAN
194         SWAP2   $Z0,$Z1                 ; 11/-
195 ||      SWAP4   $Z1,$Z0
196         SWAP4   $Z1,$Z1                 ; 12/-
197 ||      SWAP2   $Z0,$Z0
198         SWAP2   $Z2,$Z3
199 ||      SWAP4   $Z3,$Z2
200 ||[!B0] BNOP    RA
201         SWAP4   $Z3,$Z3
202 ||      SWAP2   $Z2,$Z2
203 || [B0] BNOP    ghash_loop?
204    [B0] XOR     $H0x,$Z0,$Z0            ; Xi^=inp
205 || [B0] XOR     $H1x,$Z1,$Z1
206    [B0] XOR     $H2x,$Z2,$Z2
207 || [B0] XOR     $H3x,$Z3,$Z3
208 || [B0] SHRU    $Z1,24,$xia             ; Xi[15], avoid cross-path stall
209         STDW    $Z1:$Z0,*${xip}[1]
210 || [B0] SHRU    $Z1,16,$x0              ; Xi[14]
211 || [B0] ZERO    $Z1:$Z0
212         .else
213   [!B0] BNOP    RA                      ; 11/-
214    [B0] BNOP    ghash_loop?             ; 12/-
215    [B0] XOR     $H0x,$Z0,$Z0            ; Xi^=inp
216 || [B0] XOR     $H1x,$Z1,$Z1
217    [B0] XOR     $H2x,$Z2,$Z2
218 || [B0] XOR     $H3x,$Z3,$Z3
219 || [B0] MV      $Z0,$xia                ; Xi[15], avoid cross-path stall
220         STDW    $Z1:$Z0,*${xip}[1]
221 || [B0] SHRU    $Z0,8,$x0               ; Xi[14]
222 || [B0] ZERO    $Z1:$Z0
223         .endif
224         STDW    $Z3:$Z2,*${xip}[0]
225 || [B0] ZERO    $Z3:$Z2
226 || [B0] MV      $xia,$x1
227    [B0] ADDK    14,${xip}
228         .endasmfunc
229
230         .sect   .const
231         .cstring "GHASH for C64x+, CRYPTOGAMS by <appro\@openssl.org>"
232         .align  4
233 ___
234
235 print $code;
236 close STDOUT;