Consistently use arm_arch.h constants in armcap assembly code.
[openssl.git] / crypto / poly1305 / asm / poly1305-c64xplus.pl
1 #!/usr/bin/env perl
2 #
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9 #
10 # Poly1305 hash for C64x+.
11 #
12 # October 2015
13 #
14 # Performance is [incredible for a 32-bit processor] 1.76 cycles per
15 # processed byte. Comparison to compiler-generated code is problematic,
16 # because results were observed to vary from 2.1 to 7.6 cpb depending
17 # on compiler's ability to inline small functions. Compiler also
18 # disables interrupts for some reason, thus making interrupt response
19 # time dependent on input length. This module on the other hand is free
20 # from such limitation.
21
22 ($CTXA,$INPB,$LEN,$PADBIT)=("A4","B4","A6","B6");
23 ($H0,$H1,$H2,$H3,$H4,$H4a)=("A8","B8","A10","B10","B2",$LEN);
24 ($D0,$D1,$D2,$D3)=         ("A9","B9","A11","B11");
25 ($R0,$R1,$R2,$R3,$S1,$S2,$S3,$S3b)=("A0","B0","A1","B1","A12","B12","A13","B13");
26 ($THREE,$R0b,$S2a)=("B7","B5","A5");
27
28 $code.=<<___;
29         .text
30
31         .if     .ASSEMBLER_VERSION<7000000
32         .asg    0,__TI_EABI__
33         .endif
34         .if     __TI_EABI__
35         .asg    poly1305_init,_poly1305_init
36         .asg    poly1305_blocks,_poly1305_blocks
37         .asg    poly1305_emit,_poly1305_emit
38         .endif
39
40         .asg    B3,RA
41         .asg    A15,FP
42         .asg    B15,SP
43
44         .if     .LITTLE_ENDIAN
45         .asg    MV,SWAP2
46         .asg    MV.L,SWAP4
47         .endif
48
49         .global _poly1305_init
50 _poly1305_init:
51         .asmfunc
52         LDNDW   *${INPB}[0],B17:B16     ; load key material
53         LDNDW   *${INPB}[1],A17:A16
54
55 ||      ZERO    B9:B8
56 ||      MVK     -1,B0
57         STDW    B9:B8,*${CTXA}[0]       ; initialize h1:h0
58 ||      SHRU    B0,4,B0                 ; 0x0fffffff
59 ||      MVK     -4,B1
60         STDW    B9:B8,*${CTXA}[1]       ; initialize h3:h2
61 ||      AND     B0,B1,B1                ; 0x0ffffffc
62         STW     B8,*${CTXA}[4]          ; initialize h4
63
64         .if     .BIG_ENDIAN
65         SWAP2   B16,B17
66 ||      SWAP2   B17,B16
67         SWAP2   A16,A17
68 ||      SWAP2   A17,A16
69         SWAP4   B16,B16
70 ||      SWAP4   A16,A16
71         SWAP4   B17,B17
72 ||      SWAP4   A17,A17
73         .endif
74
75         AND     B16,B0,B20              ; r0 = key[0] & 0x0fffffff
76 ||      AND     B17,B1,B22              ; r1 = key[1] & 0x0ffffffc
77 ||      EXTU    B17,4,6,B16             ; r1>>2
78         AND     A16,B1,B21              ; r2 = key[2] & 0x0ffffffc
79 ||      AND     A17,B1,A23              ; r3 = key[3] & 0x0ffffffc
80 ||      BNOP    RA
81         SHRU    B21,2,B18
82 ||      ADD     B22,B16,B16             ; s1 = r1 + r1>>2
83
84         STDW    B21:B20,*${CTXA}[3]     ; save r2:r0
85 ||      ADD     B21,B18,B18             ; s2 = r2 + r2>>2
86 ||      SHRU    A23,2,B17
87 ||      MV      A23,B23
88         STDW    B23:B22,*${CTXA}[4]     ; save r3:r1
89 ||      ADD     B23,B17,B19             ; s3 = r3 + r3>>2
90 ||      ADD     B23,B17,B17             ; s3 = r3 + r3>>2
91         STDW    B17:B16,*${CTXA}[5]     ; save s3:s1
92         STDW    B19:B18,*${CTXA}[6]     ; save s3:s2
93 ||      ZERO    A4                      ; return 0
94         .endasmfunc
95
96         .global _poly1305_blocks
97         .align  32
98 _poly1305_blocks:
99         .asmfunc        stack_usage(40)
100         SHRU    $LEN,4,A2               ; A2 is loop counter, number of blocks
101   [!A2] BNOP    RA                      ; no data
102 || [A2] STW     FP,*SP--(40)            ; save frame pointer and alloca(40)
103 || [A2] MV      SP,FP
104    [A2] STDW    B13:B12,*SP[4]          ; ABI says so
105 || [A2] MV      $CTXA,$S3b              ; borrow $S3b
106    [A2] STDW    B11:B10,*SP[3]
107 || [A2] STDW    A13:A12,*FP[-3]
108    [A2] STDW    A11:A10,*FP[-4]
109
110 || [A2] LDDW    *${S3b}[0],B25:B24      ; load h1:h0
111    [A2] LDNW    *${INPB}++[4],$D0       ; load inp[0]
112    [A2] LDNW    *${INPB}[-3],$D1        ; load inp[1]
113
114         LDDW    *${CTXA}[1],B29:B28     ; load h3:h2, B28 is h2
115         LDNW    *${INPB}[-2],$D2        ; load inp[2]
116         LDNW    *${INPB}[-1],$D3        ; load inp[3]
117
118         LDDW    *${CTXA}[3],$R2:$R0     ; load r2:r0
119 ||      LDDW    *${S3b}[4],$R3:$R1      ; load r3:r1
120 ||      SWAP2   $D0,$D0
121
122         LDDW    *${CTXA}[5],$S3:$S1     ; load s3:s1
123 ||      LDDW    *${S3b}[6],$S3b:$S2     ; load s3:s2
124 ||      SWAP4   $D0,$D0
125 ||      SWAP2   $D1,$D1
126
127         ADDU    $D0,B24,$D0:$H0         ; h0+=inp[0]
128 ||      ADD     $D0,B24,B31             ; B-copy of h0+inp[0]
129 ||      SWAP4   $D1,$D1
130         ADDU    $D1,B25,$D1:$H1         ; h1+=inp[1]
131 ||      MVK     3,$THREE
132 ||      SWAP2   $D2,$D2
133         LDW     *${CTXA}[4],$H4         ; load h4
134 ||      SWAP4   $D2,$D2
135 ||      MV      B29,B30                 ; B30 is h3
136         MV      $R0,$R0b
137
138 loop?:
139         MPY32U  $H0,$R0,A17:A16
140 ||      MPY32U  B31,$R1,B17:B16         ; MPY32U        $H0,$R1,B17:B16
141 ||      ADDU    $D0,$D1:$H1,B25:B24     ; ADDU          $D0,$D1:$H1,$D1:$H1
142 ||      ADDU    $D2,B28,$D2:$H2         ; h2+=inp[2]
143 ||      SWAP2   $D3,$D3
144         MPY32U  $H0,$R2,A19:A18
145 ||      MPY32U  B31,$R3,B19:B18         ; MPY32U        $H0,$R3,B19:B18
146 ||      ADD     $D0,$H1,A24             ; A-copy of B24
147 ||      SWAP4   $D3,$D3
148 || [A2] SUB     A2,1,A2                 ; decrement loop counter
149
150         MPY32U  A24,$S3,A21:A20         ; MPY32U        $H1,$S3,A21:A20
151 ||      MPY32U  B24,$R0b,B21:B20        ; MPY32U        $H1,$R0,B21:B20
152 ||      ADDU    B25,$D2:$H2,$D2:$H2     ; ADDU          $D1,$D2:$H2,$D2:$H2
153 ||      ADDU    $D3,B30,$D3:$H3         ; h3+=inp[3]
154 ||      ADD     B25,$H2,B25             ; B-copy of $H2
155         MPY32U  A24,$R1,A23:A22         ; MPY32U        $H1,$R1,A23:A22
156 ||      MPY32U  B24,$R2,B23:B22         ; MPY32U        $H1,$R2,B23:B22
157
158         MPY32U  $H2,$S2,A25:A24
159 ||      MPY32U  B25,$S3b,B25:B24        ; MPY32U        $H2,$S3,B25:B24
160 ||      ADDU    $D2,$D3:$H3,$D3:$H3
161 ||      ADD     $PADBIT,$H4,$H4         ; h4+=padbit
162         MPY32U  $H2,$R0,A27:A26
163 ||      MPY32U  $H2,$R1,B27:B26
164 ||      ADD     $D3,$H4,$H4
165 ||      MV      $S2,$S2a
166
167         MPY32U  $H3,$S1,A29:A28
168 ||      MPY32U  $H3,$S2,B29:B28
169 ||      ADD     A21,A17,A21             ; start accumulating "d3:d0"
170 ||      ADD     B21,B17,B21
171 ||      ADDU    A20,A16,A17:A16
172 ||      ADDU    B20,B16,B17:B16
173 || [A2] LDNW    *${INPB}++[4],$D0       ; load inp[0]
174         MPY32U  $H3,$S3,A31:A30
175 ||      MPY32U  $H3,$R0b,B31:B30
176 ||      ADD     A23,A19,A23
177 ||      ADD     B23,B19,B23
178 ||      ADDU    A22,A18,A19:A18
179 ||      ADDU    B22,B18,B19:B18
180 || [A2] LDNW    *${INPB}[-3],$D1        ; load inp[1]
181
182         MPY32   $H4,$S1,B20
183 ||      MPY32   $H4,$S2a,A20
184 ||      ADD     A25,A21,A21
185 ||      ADD     B25,B21,B21
186 ||      ADDU    A24,A17:A16,A17:A16
187 ||      ADDU    B24,B17:B16,B17:B16
188 || [A2] LDNW    *${INPB}[-2],$D2        ; load inp[2]
189         MPY32   $H4,$S3b,B22
190 ||      ADD     A27,A23,A23
191 ||      ADD     B27,B23,B23
192 ||      ADDU    A26,A19:A18,A19:A18
193 ||      ADDU    B26,B19:B18,B19:B18
194 || [A2] LDNW    *${INPB}[-1],$D3        ; load inp[3]
195
196         MPY32   $H4,$R0b,$H4
197 ||      ADD     A29,A21,A21             ; final hi("d0")
198 ||      ADD     B29,B21,B21             ; final hi("d1")
199 ||      ADDU    A28,A17:A16,A17:A16     ; final lo("d0")
200 ||      ADDU    B28,B17:B16,B17:B16
201         ADD     A31,A23,A23             ; final hi("d2")
202 ||      ADD     B31,B23,B23             ; final hi("d3")
203 ||      ADDU    A30,A19:A18,A19:A18
204 ||      ADDU    B30,B19:B18,B19:B18
205         ADDU    B20,B17:B16,B17:B16     ; final lo("d1")
206 ||      ADDU    A20,A19:A18,A19:A18     ; final lo("d2")
207         ADDU    B22,B19:B18,B19:B18     ; final lo("d3")
208
209 ||      ADD     A17,A21,A21             ; "flatten" "d3:d0"
210         MV      A19,B29                 ; move to avoid cross-path stalls
211         ADDU    A21,B17:B16,B27:B26     ; B26 is h1
212         ADD     B21,B27,B27
213 ||      DMV     B29,A18,B29:B28         ; move to avoid cross-path stalls
214         ADDU    B27,B29:B28,B29:B28     ; B28 is h2
215 || [A2] SWAP2   $D0,$D0
216         ADD     A23,B29,B29
217 || [A2] SWAP4   $D0,$D0
218         ADDU    B29,B19:B18,B31:B30     ; B30 is h3
219         ADD     B23,B31,B31
220 ||      MV      A16,B24                 ; B24 is h0
221 || [A2] SWAP2   $D1,$D1
222         ADD     B31,$H4,$H4
223 || [A2] SWAP4   $D1,$D1
224
225         SHRU    $H4,2,B16               ; last reduction step
226 ||      AND     $H4,$THREE,$H4
227 || [A2] BNOP    loop?
228         ADDAW   B16,B16,B16             ; 5*(h4>>2)
229
230         ADDU    B24,B16,B25:B24         ; B24 is h0
231 || [A2] SWAP2   $D2,$D2
232         ADDU    B26,B25,B27:B26         ; B26 is h1
233 || [A2] SWAP4   $D2,$D2
234         ADDU    B28,B27,B29:B28         ; B28 is h2
235 || [A2] ADDU    $D0,B24,$D0:$H0         ; h0+=inp[0]
236 || [A2] ADD     $D0,B24,B31             ; B-copy of h0+inp[0]
237         ADD     B30,B29,B30             ; B30 is h3
238 || [A2] ADDU    $D1,B26,$D1:$H1         ; h1+=inp[1]
239 ;;===== branch to loop? is taken here
240
241         LDDW    *FP[-4],A11:A10         ; ABI says so
242         LDDW    *FP[-3],A13:A12
243 ||      LDDW    *SP[3],B11:B10
244         LDDW    *SP[4],B13:B12
245 ||      MV      B26,B25
246 ||      BNOP    RA
247         LDW     *++SP(40),FP            ; restore frame pointer
248 ||      MV      B30,B29
249         STDW    B25:B24,*${CTXA}[0]     ; save h1:h0
250         STDW    B29:B28,*${CTXA}[1]     ; save h3:h2
251         STW     $H4,*${CTXA}[4]         ; save h4
252         NOP     1
253         .endasmfunc
254 ___
255 {
256 my ($MAC,$NONCEA,$NONCEB)=($INPB,$LEN,$PADBIT);
257
258 $code.=<<___;
259         .global _poly1305_emit
260         .align  32
261 _poly1305_emit:
262         .asmfunc
263         LDDW    *${CTXA}[0],A17:A16     ; load h1:h0
264         LDDW    *${CTXA}[1],A19:A18     ; load h3:h2
265         LDW     *${CTXA}[4],A20         ; load h4
266         MV      $NONCEA,$NONCEB
267
268         MVK     5,A22                   ; compare to modulus
269         ADDU    A16,A22,A23:A22
270 ||      LDW     *${NONCEA}[0],A8
271 ||      LDW     *${NONCEB}[1],B8
272         ADDU    A17,A23,A25:A24
273 ||      LDW     *${NONCEA}[2],A9
274 ||      LDW     *${NONCEB}[3],B9
275         ADDU    A19,A25,A27:A26
276         ADDU    A19,A27,A29:A28
277         ADD     A20,A29,A29
278
279         SHRU    A29,2,A2                ; check for overflow in 130-th bit
280
281    [A2] MV      A22,A16                 ; select
282 || [A2] MV      A24,A17
283    [A2] MV      A26,A18
284 || [A2] MV      A28,A19
285
286 ||      ADDU    A8,A16,A23:A22          ; accumulate nonce
287         ADDU    B8,A17,A25:A24
288 ||      SWAP2   A22,A22
289         ADDU    A23,A25:A24,A25:A24
290         ADDU    A9,A18,A27:A26
291 ||      SWAP2   A24,A24
292         ADDU    A25,A27:A26,A27:A26
293 ||      ADD     B9,A19,A28
294         ADD     A27,A28,A28
295 ||      SWAP2   A26,A26
296
297         .if     .BIG_ENDIAN
298         SWAP2   A28,A28
299 ||      SWAP4   A22,A22
300 ||      SWAP4   A24,B24
301         SWAP4   A26,A26
302         SWAP4   A28,A28
303 ||      MV      B24,A24
304         .endif
305
306         BNOP    RA,1
307         STNW    A22,*${MAC}[0]          ; write the result
308         STNW    A24,*${MAC}[1]
309         STNW    A26,*${MAC}[2]
310         STNW    A28,*${MAC}[3]
311         .endasmfunc
312 ___
313 }
314 $code.=<<___;
315         .sect   .const
316         .cstring "Poly1305 for C64x+, CRYPTOGAMS by <appro\@openssl.org>"
317         .align  4
318 ___
319
320 print $code;