Merge branch 'master' of git.openssl.org:openssl
[openssl.git] / crypto / modes / asm / ghash-c64xplus.pl
1 #!/usr/bin/env perl
2 #
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9 #
10 # December 2011
11 #
12 # The module implements GCM GHASH function and underlying single
13 # multiplication operation in GF(2^128). Even though subroutines
14 # have _4bit suffix, they are not using any tables, but rely on
15 # hardware Galois Field Multiply support. Streamed GHASH processes
16 # byte in ~7 cycles, which is >6x faster than "4-bit" table-driven
17 # code compiled with TI's cl6x 6.0 with -mv6400+ -o2 flags. We are
18 # comparing apples vs. oranges, but compiler surely could have done
19 # better, because theoretical [though not necessarily achievable]
20 # estimate for "4-bit" table-driven implementation is ~12 cycles.
21
22 while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
23 open STDOUT,">$output";
24
25 ($Xip,$Htable,$inp,$len)=("A4","B4","A6","B6"); # arguments
26
27 ($Z0,$Z1,$Z2,$Z3,       $H0, $H1, $H2, $H3,
28                         $H0x,$H1x,$H2x,$H3x)=map("A$_",(16..27));
29 ($H01u,$H01y,$H2u,$H3u, $H0y,$H1y,$H2y,$H3y,
30                         $H0z,$H1z,$H2z,$H3z)=map("B$_",(16..27));
31 ($FF000000,$E10000)=("B30","B31");
32 ($xip,$x0,$x1,$xib)=map("B$_",(6..9));  # $xip zaps $len
33  $xia="A9";
34 ($rem,$res)=("B4","B5");                # $rem zaps $Htable
35
36 $code.=<<___;
37         .text
38
39         .if     .ASSEMBLER_VERSION<7000000
40         .asg    0,__TI_EABI__
41         .endif
42         .if     __TI_EABI__
43         .asg    gcm_gmult_1bit,_gcm_gmult_1bit
44         .asg    gcm_gmult_4bit,_gcm_gmult_4bit
45         .asg    gcm_ghash_4bit,_gcm_ghash_4bit
46         .endif
47
48         .asg    B3,RA
49
50         .if     0
51         .global _gcm_gmult_1bit
52 _gcm_gmult_1bit:
53         ADDAD   $Htable,2,$Htable
54         .endif
55         .global _gcm_gmult_4bit
56 _gcm_gmult_4bit:
57         .asmfunc
58         LDDW    *${Htable}[-1],$H1:$H0  ; H.lo
59         LDDW    *${Htable}[-2],$H3:$H2  ; H.hi
60 ||      MV      $Xip,${xip}             ; reassign Xi
61 ||      MVK     15,B1                   ; SPLOOPD constant
62
63         MVK     0xE1,$E10000
64 ||      LDBU    *++${xip}[15],$x1       ; Xi[15]
65         MVK     0xFF,$FF000000
66 ||      LDBU    *--${xip},$x0           ; Xi[14]
67         SHL     $E10000,16,$E10000      ; [pre-shifted] reduction polynomial
68         SHL     $FF000000,24,$FF000000  ; upper byte mask
69 ||      BNOP    ghash_loop?
70 ||      MVK     1,B0                    ; take a single spin
71
72         PACKH2  $H0,$H1,$xia            ; pack H0' and H1's upper bytes
73         AND     $H2,$FF000000,$H2u      ; H2's upper byte
74         AND     $H3,$FF000000,$H3u      ; H3's upper byte
75 ||      SHRU    $H2u,8,$H2u
76         SHRU    $H3u,8,$H3u
77 ||      ZERO    $Z1:$Z0
78         SHRU2   $xia,8,$H01u
79 ||      ZERO    $Z3:$Z2
80         .endasmfunc
81
82         .global _gcm_ghash_4bit
83 _gcm_ghash_4bit:
84         .asmfunc
85         LDDW    *${Htable}[-1],$H1:$H0  ; H.lo
86 ||      SHRU    $len,4,B0               ; reassign len
87         LDDW    *${Htable}[-2],$H3:$H2  ; H.hi
88 ||      MV      $Xip,${xip}             ; reassign Xi
89 ||      MVK     15,B1                   ; SPLOOPD constant
90
91         MVK     0xE1,$E10000
92 || [B0] LDNDW   *${inp}[1],$H1x:$H0x
93         MVK     0xFF,$FF000000
94 || [B0] LDNDW   *${inp}++[2],$H3x:$H2x
95         SHL     $E10000,16,$E10000      ; [pre-shifted] reduction polynomial
96 ||      LDDW    *${xip}[1],$Z1:$Z0
97         SHL     $FF000000,24,$FF000000  ; upper byte mask
98 ||      LDDW    *${xip}[0],$Z3:$Z2
99
100         PACKH2  $H0,$H1,$xia            ; pack H0' and H1's upper bytes
101         AND     $H2,$FF000000,$H2u      ; H2's upper byte
102         AND     $H3,$FF000000,$H3u      ; H3's upper byte
103 ||      SHRU    $H2u,8,$H2u
104         SHRU    $H3u,8,$H3u
105         SHRU2   $xia,8,$H01u
106
107 || [B0] XOR     $H0x,$Z0,$Z0            ; Xi^=inp
108 || [B0] XOR     $H1x,$Z1,$Z1
109         .if     .LITTLE_ENDIAN
110    [B0] XOR     $H2x,$Z2,$Z2
111 || [B0] XOR     $H3x,$Z3,$Z3
112 || [B0] SHRU    $Z1,24,$xia             ; Xi[15], avoid cross-path stall
113         STDW    $Z1:$Z0,*${xip}[1]
114 || [B0] SHRU    $Z1,16,$x0              ; Xi[14]
115 || [B0] ZERO    $Z1:$Z0
116         .else
117    [B0] XOR     $H2x,$Z2,$Z2
118 || [B0] XOR     $H3x,$Z3,$Z3
119 || [B0] MV      $Z0,$xia                ; Xi[15], avoid cross-path stall
120         STDW    $Z1:$Z0,*${xip}[1]
121 || [B0] SHRU    $Z0,8,$x0               ; Xi[14]
122 || [B0] ZERO    $Z1:$Z0
123         .endif
124         STDW    $Z3:$Z2,*${xip}[0]
125 || [B0] ZERO    $Z3:$Z2
126 || [B0] MV      $xia,$x1
127    [B0] ADDK    14,${xip}
128
129 ghash_loop?:
130         SPLOOPD 6                       ; 6*16+7
131 ||      MVC     B1,ILC
132 || [B0] SUB     B0,1,B0
133 ||      ZERO    A0
134 ||      ADD     $x1,$x1,$xib            ; SHL   $x1,1,$xib
135 ||      SHL     $x1,1,$xia
136 ___
137 \f
138 ########____________________________
139 #  0    D2.     M1          M2      |
140 #  1            M1                  |
141 #  2            M1          M2      |
142 #  3        D1. M1          M2      |
143 #  4        S1. L1                  |
144 #  5    S2  S1x L1          D2  L2  |____________________________
145 #  6/0          L1  S1      L2  S2x |D2.     M1          M2      |
146 #  7/1          L1  S1  D1x S2  M2  |        M1                  |
147 #  8/2              S1  L1x S2      |        M1          M2      |
148 #  9/3              S1  L1x         |    D1. M1          M2      |
149 # 10/4                  D1x         |    S1. L1                  |
150 # 11/5                              |S2  S1x L1          D2  L2  |____________
151 # 12/6/0                D1x       __|        L1  S1      L2  S2x |D2.     ....
152 #    7/1                                     L1  S1  D1x S2  M2  |        ....
153 #    8/2                                         S1  L1x S2      |        ....
154 #####...                                         ................|............
155 $code.=<<___;
156         XORMPY  $H0,$xia,$H0x           ; 0     ; H·(Xi[i]<<1)
157 ||      XORMPY  $H01u,$xib,$H01y
158 || [A0] LDBU    *--${xip},$x0
159         XORMPY  $H1,$xia,$H1x           ; 1
160         XORMPY  $H2,$xia,$H2x           ; 2
161 ||      XORMPY  $H2u,$xib,$H2y
162         XORMPY  $H3,$xia,$H3x           ; 3
163 ||      XORMPY  $H3u,$xib,$H3y
164 ||[!A0] MVK.D   15,A0                           ; *--${xip} counter
165         XOR.L   $H0x,$Z0,$Z0            ; 4     ; Z^=H·(Xi[i]<<1)
166 || [A0] SUB.S   A0,1,A0
167         XOR.L   $H1x,$Z1,$Z1            ; 5
168 ||      AND.D   $H01y,$FF000000,$H0z
169 ||      SWAP2.L $H01y,$H1y              ;       ; SHL   $H01y,16,$H1y
170 ||      SHL     $x0,1,$xib
171 ||      SHL     $x0,1,$xia
172
173         XOR.L   $H2x,$Z2,$Z2            ; 6/0   ; [0,0] in epilogue
174 ||      SHL     $Z0,1,$rem              ;       ; rem=Z<<1
175 ||      SHRMB.S $Z1,$Z0,$Z0             ;       ; Z>>=8
176 ||      AND.L   $H1y,$FF000000,$H1z
177         XOR.L   $H3x,$Z3,$Z3            ; 7/1
178 ||      SHRMB.S $Z2,$Z1,$Z1
179 ||      XOR.D   $H0z,$Z0,$Z0                    ; merge upper byte products
180 ||      AND.S   $H2y,$FF000000,$H2z
181 ||      XORMPY  $E10000,$rem,$res       ;       ; implicit rem&0x1FE
182         XOR.L   $H1z,$Z1,$Z1            ; 8/2
183 ||      SHRMB.S $Z3,$Z2,$Z2
184 ||      AND.S   $H3y,$FF000000,$H3z
185         XOR.L   $H2z,$Z2,$Z2            ; 9/3
186 ||      SHRU    $Z3,8,$Z3
187         XOR.D   $H3z,$Z3,$Z3            ; 10/4
188         NOP                             ; 11/5
189
190         SPKERNEL 0,2
191 ||      XOR.D   $res,$Z3,$Z3            ; 12/6/0; Z^=res
192
193         ; input pre-fetch is possible where D1 slot is available...
194    [B0] LDNDW   *${inp}[1],$H1x:$H0x    ; 8/-
195    [B0] LDNDW   *${inp}++[2],$H3x:$H2x  ; 9/-
196         NOP                             ; 10/-
197         .if     .LITTLE_ENDIAN
198         SWAP2   $Z0,$Z1                 ; 11/-
199 ||      SWAP4   $Z1,$Z0
200         SWAP4   $Z1,$Z1                 ; 12/-
201 ||      SWAP2   $Z0,$Z0
202         SWAP2   $Z2,$Z3
203 ||      SWAP4   $Z3,$Z2
204 ||[!B0] BNOP    RA
205         SWAP4   $Z3,$Z3
206 ||      SWAP2   $Z2,$Z2
207 || [B0] BNOP    ghash_loop?
208    [B0] XOR     $H0x,$Z0,$Z0            ; Xi^=inp
209 || [B0] XOR     $H1x,$Z1,$Z1
210    [B0] XOR     $H2x,$Z2,$Z2
211 || [B0] XOR     $H3x,$Z3,$Z3
212 || [B0] SHRU    $Z1,24,$xia             ; Xi[15], avoid cross-path stall
213         STDW    $Z1:$Z0,*${xip}[1]
214 || [B0] SHRU    $Z1,16,$x0              ; Xi[14]
215 || [B0] ZERO    $Z1:$Z0
216         .else
217   [!B0] BNOP    RA                      ; 11/-
218    [B0] BNOP    ghash_loop?             ; 12/-
219    [B0] XOR     $H0x,$Z0,$Z0            ; Xi^=inp
220 || [B0] XOR     $H1x,$Z1,$Z1
221    [B0] XOR     $H2x,$Z2,$Z2
222 || [B0] XOR     $H3x,$Z3,$Z3
223 || [B0] MV      $Z0,$xia                ; Xi[15], avoid cross-path stall
224         STDW    $Z1:$Z0,*${xip}[1]
225 || [B0] SHRU    $Z0,8,$x0               ; Xi[14]
226 || [B0] ZERO    $Z1:$Z0
227         .endif
228         STDW    $Z3:$Z2,*${xip}[0]
229 || [B0] ZERO    $Z3:$Z2
230 || [B0] MV      $xia,$x1
231    [B0] ADDK    14,${xip}
232         .endasmfunc
233
234         .sect   .const
235         .cstring "GHASH for C64x+, CRYPTOGAMS by <appro\@openssl.org>"
236         .align  4
237 ___
238
239 print $code;
240 close STDOUT;