1 /* ====================================================================
2 * Copyright (c) 2011-2013 The OpenSSL Project. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in
13 * the documentation and/or other materials provided with the
16 * 3. All advertising materials mentioning features or use of this
17 * software must display the following acknowledgment:
18 * "This product includes software developed by the OpenSSL Project
19 * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
21 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
22 * endorse or promote products derived from this software without
23 * prior written permission. For written permission, please contact
24 * licensing@OpenSSL.org.
26 * 5. Products derived from this software may not be called "OpenSSL"
27 * nor may "OpenSSL" appear in their names without prior written
28 * permission of the OpenSSL Project.
30 * 6. Redistributions of any form whatsoever must retain the following
32 * "This product includes software developed by the OpenSSL Project
33 * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
35 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
36 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
38 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
39 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
41 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
42 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
44 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
45 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
46 * OF THE POSSIBILITY OF SUCH DAMAGE.
47 * ====================================================================
50 /* This implementation of poly1305 is by Andrew Moon
51 * (https://github.com/floodyberry/poly1305-donna) and released as public
52 * domain. It implements SIMD vectorization based on the algorithm described in
53 * http://cr.yp.to/papers.html#neoncrypto. Unrolled to 2 powers, i.e. 64 byte
57 #include <emmintrin.h>
59 #include <openssl/opensslconf.h>
61 #if !defined(OPENSSL_NO_POLY1305)
63 #include <openssl/poly1305.h>
65 #define ALIGN(x) __attribute__((aligned(x)))
67 #define U8TO64_LE(m) (*(uint64_t*)(m))
68 #define U8TO32_LE(m) (*(uint32_t*)(m))
69 #define U64TO8_LE(m,v) (*(uint64_t*)(m)) = v
72 typedef unsigned __int128 uint128_t;
74 static const uint32_t ALIGN(16) poly1305_x64_sse2_message_mask[4] =
75 {(1 << 26) - 1, 0, (1 << 26) - 1, 0};
76 static const uint32_t ALIGN(16) poly1305_x64_sse2_5[4] = {5, 0, 5, 0};
77 static const uint32_t ALIGN(16) poly1305_x64_sse2_1shl128[4] =
78 {(1 << 24), 0, (1 << 24), 0};
80 static uint128_t INLINE
81 add128(uint128_t a, uint128_t b)
86 static uint128_t INLINE
87 add128_64(uint128_t a, uint64_t b)
92 static uint128_t INLINE
93 mul64x64_128(uint64_t a, uint64_t b)
95 return (uint128_t)a * b;
98 static uint64_t INLINE
104 static uint64_t INLINE
105 shr128(uint128_t v, const int shift)
107 return (uint64_t)(v >> shift);
110 static uint64_t INLINE
111 shr128_pair(uint64_t hi, uint64_t lo, const int shift)
113 return (uint64_t)((((uint128_t)hi << 64) | lo) >> shift);
116 typedef struct poly1305_power_t
123 } R20,R21,R22,R23,R24,S21,S22,S23,S24;
126 typedef struct poly1305_state_internal_t
128 poly1305_power P[2]; /* 288 bytes, top 32 bit halves unused = 144
129 bytes of free storage */
132 xmmi H[5]; /* 80 bytes */
135 /* uint64_t r0,r1,r2; [24 bytes] */
136 /* uint64_t pad0,pad1; [16 bytes] */
137 uint64_t started; /* 8 bytes */
138 uint64_t leftover; /* 8 bytes */
139 uint8_t buffer[64]; /* 64 bytes */
140 } poly1305_state_internal; /* 448 bytes total + 63 bytes for
141 alignment = 511 bytes raw */
143 static poly1305_state_internal INLINE
144 *poly1305_aligned_state(poly1305_state *state)
146 return (poly1305_state_internal *)(((uint64_t)state + 63) & ~63);
149 /* copy 0-63 bytes */
151 poly1305_block_copy(uint8_t *dst, const uint8_t *src, size_t bytes)
153 size_t offset = src - dst;
156 _mm_storeu_si128((xmmi *)(dst + 0), _mm_loadu_si128((xmmi *)(dst + offset + 0)));
157 _mm_storeu_si128((xmmi *)(dst + 16), _mm_loadu_si128((xmmi *)(dst + offset + 16)));
162 _mm_storeu_si128((xmmi *)dst,
163 _mm_loadu_si128((xmmi *)(dst + offset)));
168 *(uint64_t *)dst = *(uint64_t *)(dst + offset);
173 *(uint32_t *)dst = *(uint32_t *)(dst + offset);
178 *(uint16_t *)dst = *(uint16_t *)(dst + offset);
183 *( uint8_t *)dst = *( uint8_t *)(dst + offset);
187 /* zero 0-15 bytes */
189 poly1305_block_zero(uint8_t *dst, size_t bytes)
191 if (bytes & 8) { *(uint64_t *)dst = 0; dst += 8; }
192 if (bytes & 4) { *(uint32_t *)dst = 0; dst += 4; }
193 if (bytes & 2) { *(uint16_t *)dst = 0; dst += 2; }
194 if (bytes & 1) { *( uint8_t *)dst = 0; }
198 poly1305_min(size_t a, size_t b)
200 return (a < b) ? a : b;
204 CRYPTO_poly1305_init(poly1305_state *state, const unsigned char key[32])
206 poly1305_state_internal *st = poly1305_aligned_state(state);
212 t0 = U8TO64_LE(key + 0);
213 t1 = U8TO64_LE(key + 8);
214 r0 = t0 & 0xffc0fffffff; t0 >>= 44; t0 |= t1 << 20;
215 r1 = t0 & 0xfffffc0ffff; t1 >>= 24;
216 r2 = t1 & 0x00ffffffc0f;
218 /* store r in un-used space of st->P[1] */
220 p->R20.d[1] = (uint32_t)(r0 );
221 p->R20.d[3] = (uint32_t)(r0 >> 32);
222 p->R21.d[1] = (uint32_t)(r1 );
223 p->R21.d[3] = (uint32_t)(r1 >> 32);
224 p->R22.d[1] = (uint32_t)(r2 );
225 p->R22.d[3] = (uint32_t)(r2 >> 32);
228 p->R23.d[1] = U8TO32_LE(key + 16);
229 p->R23.d[3] = U8TO32_LE(key + 20);
230 p->R24.d[1] = U8TO32_LE(key + 24);
231 p->R24.d[3] = U8TO32_LE(key + 28);
234 st->H[0] = _mm_setzero_si128();
235 st->H[1] = _mm_setzero_si128();
236 st->H[2] = _mm_setzero_si128();
237 st->H[3] = _mm_setzero_si128();
238 st->H[4] = _mm_setzero_si128();
245 poly1305_first_block(poly1305_state_internal *st, const uint8_t *m)
248 _mm_load_si128((xmmi *)poly1305_x64_sse2_message_mask);
249 const xmmi FIVE = _mm_load_si128((xmmi*)poly1305_x64_sse2_5);
250 const xmmi HIBIT = _mm_load_si128((xmmi*)poly1305_x64_sse2_1shl128);
255 uint64_t r20,r21,r22,s22;
260 /* pull out stored info */
263 r0 = ((uint64_t)p->R20.d[3] << 32) | (uint64_t)p->R20.d[1];
264 r1 = ((uint64_t)p->R21.d[3] << 32) | (uint64_t)p->R21.d[1];
265 r2 = ((uint64_t)p->R22.d[3] << 32) | (uint64_t)p->R22.d[1];
266 pad0 = ((uint64_t)p->R23.d[3] << 32) | (uint64_t)p->R23.d[1];
267 pad1 = ((uint64_t)p->R24.d[3] << 32) | (uint64_t)p->R24.d[1];
269 /* compute powers r^2,r^4 */
273 for (i = 0; i < 2; i++)
275 s22 = r22 * (5 << 2);
277 d[0] = add128(mul64x64_128(r20, r20), mul64x64_128(r21 * 2, s22));
278 d[1] = add128(mul64x64_128(r22, s22), mul64x64_128(r20 * 2, r21));
279 d[2] = add128(mul64x64_128(r21, r21), mul64x64_128(r22 * 2, r20));
281 r20 = lo128(d[0]) & 0xfffffffffff; c = shr128(d[0], 44);
282 d[1] = add128_64(d[1], c); r21 = lo128(d[1]) & 0xfffffffffff; c = shr128(d[1], 44);
283 d[2] = add128_64(d[2], c); r22 = lo128(d[2]) & 0x3ffffffffff; c = shr128(d[2], 42);
284 r20 += c * 5; c = (r20 >> 44); r20 = r20 & 0xfffffffffff;
287 p->R20.v = _mm_shuffle_epi32(_mm_cvtsi32_si128((uint32_t)( r20 ) & 0x3ffffff), _MM_SHUFFLE(1,0,1,0));
288 p->R21.v = _mm_shuffle_epi32(_mm_cvtsi32_si128((uint32_t)((r20 >> 26) | (r21 << 18)) & 0x3ffffff), _MM_SHUFFLE(1,0,1,0));
289 p->R22.v = _mm_shuffle_epi32(_mm_cvtsi32_si128((uint32_t)((r21 >> 8) ) & 0x3ffffff), _MM_SHUFFLE(1,0,1,0));
290 p->R23.v = _mm_shuffle_epi32(_mm_cvtsi32_si128((uint32_t)((r21 >> 34) | (r22 << 10)) & 0x3ffffff), _MM_SHUFFLE(1,0,1,0));
291 p->R24.v = _mm_shuffle_epi32(_mm_cvtsi32_si128((uint32_t)((r22 >> 16) ) ), _MM_SHUFFLE(1,0,1,0));
292 p->S21.v = _mm_mul_epu32(p->R21.v, FIVE);
293 p->S22.v = _mm_mul_epu32(p->R22.v, FIVE);
294 p->S23.v = _mm_mul_epu32(p->R23.v, FIVE);
295 p->S24.v = _mm_mul_epu32(p->R24.v, FIVE);
299 /* put saved info back */
301 p->R20.d[1] = (uint32_t)(r0 );
302 p->R20.d[3] = (uint32_t)(r0 >> 32);
303 p->R21.d[1] = (uint32_t)(r1 );
304 p->R21.d[3] = (uint32_t)(r1 >> 32);
305 p->R22.d[1] = (uint32_t)(r2 );
306 p->R22.d[3] = (uint32_t)(r2 >> 32);
307 p->R23.d[1] = (uint32_t)(pad0 );
308 p->R23.d[3] = (uint32_t)(pad0 >> 32);
309 p->R24.d[1] = (uint32_t)(pad1 );
310 p->R24.d[3] = (uint32_t)(pad1 >> 32);
313 T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 0)), _mm_loadl_epi64((xmmi *)(m + 16)));
314 T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 8)), _mm_loadl_epi64((xmmi *)(m + 24)));
315 st->H[0] = _mm_and_si128(MMASK, T5);
316 st->H[1] = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
317 T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12));
318 st->H[2] = _mm_and_si128(MMASK, T5);
319 st->H[3] = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
320 st->H[4] = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT);
324 poly1305_blocks(poly1305_state_internal *st, const uint8_t *m, size_t bytes)
326 const xmmi MMASK = _mm_load_si128((xmmi *)poly1305_x64_sse2_message_mask);
327 const xmmi FIVE = _mm_load_si128((xmmi*)poly1305_x64_sse2_5);
328 const xmmi HIBIT = _mm_load_si128((xmmi*)poly1305_x64_sse2_1shl128);
332 xmmi T0,T1,T2,T3,T4,T5,T6;
346 T0 = _mm_mul_epu32(H0, p->R20.v);
347 T1 = _mm_mul_epu32(H0, p->R21.v);
348 T2 = _mm_mul_epu32(H0, p->R22.v);
349 T3 = _mm_mul_epu32(H0, p->R23.v);
350 T4 = _mm_mul_epu32(H0, p->R24.v);
351 T5 = _mm_mul_epu32(H1, p->S24.v); T6 = _mm_mul_epu32(H1, p->R20.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
352 T5 = _mm_mul_epu32(H2, p->S23.v); T6 = _mm_mul_epu32(H2, p->S24.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
353 T5 = _mm_mul_epu32(H3, p->S22.v); T6 = _mm_mul_epu32(H3, p->S23.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
354 T5 = _mm_mul_epu32(H4, p->S21.v); T6 = _mm_mul_epu32(H4, p->S22.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
355 T5 = _mm_mul_epu32(H1, p->R21.v); T6 = _mm_mul_epu32(H1, p->R22.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
356 T5 = _mm_mul_epu32(H2, p->R20.v); T6 = _mm_mul_epu32(H2, p->R21.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
357 T5 = _mm_mul_epu32(H3, p->S24.v); T6 = _mm_mul_epu32(H3, p->R20.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
358 T5 = _mm_mul_epu32(H4, p->S23.v); T6 = _mm_mul_epu32(H4, p->S24.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
359 T5 = _mm_mul_epu32(H1, p->R23.v); T4 = _mm_add_epi64(T4, T5);
360 T5 = _mm_mul_epu32(H2, p->R22.v); T4 = _mm_add_epi64(T4, T5);
361 T5 = _mm_mul_epu32(H3, p->R21.v); T4 = _mm_add_epi64(T4, T5);
362 T5 = _mm_mul_epu32(H4, p->R20.v); T4 = _mm_add_epi64(T4, T5);
364 /* H += [Mx,My]*[r^2,r^2] */
365 T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 0)), _mm_loadl_epi64((xmmi *)(m + 16)));
366 T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 8)), _mm_loadl_epi64((xmmi *)(m + 24)));
367 M0 = _mm_and_si128(MMASK, T5);
368 M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
369 T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12));
370 M2 = _mm_and_si128(MMASK, T5);
371 M3 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
372 M4 = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT);
375 T5 = _mm_mul_epu32(M0, p->R20.v); T6 = _mm_mul_epu32(M0, p->R21.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
376 T5 = _mm_mul_epu32(M1, p->S24.v); T6 = _mm_mul_epu32(M1, p->R20.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
377 T5 = _mm_mul_epu32(M2, p->S23.v); T6 = _mm_mul_epu32(M2, p->S24.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
378 T5 = _mm_mul_epu32(M3, p->S22.v); T6 = _mm_mul_epu32(M3, p->S23.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
379 T5 = _mm_mul_epu32(M4, p->S21.v); T6 = _mm_mul_epu32(M4, p->S22.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
380 T5 = _mm_mul_epu32(M0, p->R22.v); T6 = _mm_mul_epu32(M0, p->R23.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
381 T5 = _mm_mul_epu32(M1, p->R21.v); T6 = _mm_mul_epu32(M1, p->R22.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
382 T5 = _mm_mul_epu32(M2, p->R20.v); T6 = _mm_mul_epu32(M2, p->R21.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
383 T5 = _mm_mul_epu32(M3, p->S24.v); T6 = _mm_mul_epu32(M3, p->R20.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
384 T5 = _mm_mul_epu32(M4, p->S23.v); T6 = _mm_mul_epu32(M4, p->S24.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
385 T5 = _mm_mul_epu32(M0, p->R24.v); T4 = _mm_add_epi64(T4, T5);
386 T5 = _mm_mul_epu32(M1, p->R23.v); T4 = _mm_add_epi64(T4, T5);
387 T5 = _mm_mul_epu32(M2, p->R22.v); T4 = _mm_add_epi64(T4, T5);
388 T5 = _mm_mul_epu32(M3, p->R21.v); T4 = _mm_add_epi64(T4, T5);
389 T5 = _mm_mul_epu32(M4, p->R20.v); T4 = _mm_add_epi64(T4, T5);
392 T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 32)), _mm_loadl_epi64((xmmi *)(m + 48)));
393 T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 40)), _mm_loadl_epi64((xmmi *)(m + 56)));
394 M0 = _mm_and_si128(MMASK, T5);
395 M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
396 T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12));
397 M2 = _mm_and_si128(MMASK, T5);
398 M3 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
399 M4 = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT);
401 T0 = _mm_add_epi64(T0, M0);
402 T1 = _mm_add_epi64(T1, M1);
403 T2 = _mm_add_epi64(T2, M2);
404 T3 = _mm_add_epi64(T3, M3);
405 T4 = _mm_add_epi64(T4, M4);
408 C1 = _mm_srli_epi64(T0, 26); C2 = _mm_srli_epi64(T3, 26); T0 = _mm_and_si128(T0, MMASK); T3 = _mm_and_si128(T3, MMASK); T1 = _mm_add_epi64(T1, C1); T4 = _mm_add_epi64(T4, C2);
409 C1 = _mm_srli_epi64(T1, 26); C2 = _mm_srli_epi64(T4, 26); T1 = _mm_and_si128(T1, MMASK); T4 = _mm_and_si128(T4, MMASK); T2 = _mm_add_epi64(T2, C1); T0 = _mm_add_epi64(T0, _mm_mul_epu32(C2, FIVE));
410 C1 = _mm_srli_epi64(T2, 26); C2 = _mm_srli_epi64(T0, 26); T2 = _mm_and_si128(T2, MMASK); T0 = _mm_and_si128(T0, MMASK); T3 = _mm_add_epi64(T3, C1); T1 = _mm_add_epi64(T1, C2);
411 C1 = _mm_srli_epi64(T3, 26); T3 = _mm_and_si128(T3, MMASK); T4 = _mm_add_epi64(T4, C1);
413 /* H = (H*[r^4,r^4] + [Mx,My]*[r^2,r^2] + [Mx,My]) */
432 poly1305_combine(poly1305_state_internal *st, const uint8_t *m, size_t bytes)
435 _mm_load_si128((xmmi *)poly1305_x64_sse2_message_mask);
436 const xmmi HIBIT = _mm_load_si128((xmmi*)poly1305_x64_sse2_1shl128);
437 const xmmi FIVE = _mm_load_si128((xmmi*)poly1305_x64_sse2_5);
442 xmmi T0,T1,T2,T3,T4,T5,T6;
446 uint64_t t0,t1,t2,t3,t4;
462 T0 = _mm_mul_epu32(H0, p->R20.v);
463 T1 = _mm_mul_epu32(H0, p->R21.v);
464 T2 = _mm_mul_epu32(H0, p->R22.v);
465 T3 = _mm_mul_epu32(H0, p->R23.v);
466 T4 = _mm_mul_epu32(H0, p->R24.v);
467 T5 = _mm_mul_epu32(H1, p->S24.v); T6 = _mm_mul_epu32(H1, p->R20.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
468 T5 = _mm_mul_epu32(H2, p->S23.v); T6 = _mm_mul_epu32(H2, p->S24.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
469 T5 = _mm_mul_epu32(H3, p->S22.v); T6 = _mm_mul_epu32(H3, p->S23.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
470 T5 = _mm_mul_epu32(H4, p->S21.v); T6 = _mm_mul_epu32(H4, p->S22.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
471 T5 = _mm_mul_epu32(H1, p->R21.v); T6 = _mm_mul_epu32(H1, p->R22.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
472 T5 = _mm_mul_epu32(H2, p->R20.v); T6 = _mm_mul_epu32(H2, p->R21.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
473 T5 = _mm_mul_epu32(H3, p->S24.v); T6 = _mm_mul_epu32(H3, p->R20.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
474 T5 = _mm_mul_epu32(H4, p->S23.v); T6 = _mm_mul_epu32(H4, p->S24.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
475 T5 = _mm_mul_epu32(H1, p->R23.v); T4 = _mm_add_epi64(T4, T5);
476 T5 = _mm_mul_epu32(H2, p->R22.v); T4 = _mm_add_epi64(T4, T5);
477 T5 = _mm_mul_epu32(H3, p->R21.v); T4 = _mm_add_epi64(T4, T5);
478 T5 = _mm_mul_epu32(H4, p->R20.v); T4 = _mm_add_epi64(T4, T5);
481 T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 0)), _mm_loadl_epi64((xmmi *)(m + 16)));
482 T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 8)), _mm_loadl_epi64((xmmi *)(m + 24)));
483 M0 = _mm_and_si128(MMASK, T5);
484 M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
485 T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12));
486 M2 = _mm_and_si128(MMASK, T5);
487 M3 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
488 M4 = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT);
490 T0 = _mm_add_epi64(T0, M0);
491 T1 = _mm_add_epi64(T1, M1);
492 T2 = _mm_add_epi64(T2, M2);
493 T3 = _mm_add_epi64(T3, M3);
494 T4 = _mm_add_epi64(T4, M4);
497 C1 = _mm_srli_epi64(T0, 26); C2 = _mm_srli_epi64(T3, 26); T0 = _mm_and_si128(T0, MMASK); T3 = _mm_and_si128(T3, MMASK); T1 = _mm_add_epi64(T1, C1); T4 = _mm_add_epi64(T4, C2);
498 C1 = _mm_srli_epi64(T1, 26); C2 = _mm_srli_epi64(T4, 26); T1 = _mm_and_si128(T1, MMASK); T4 = _mm_and_si128(T4, MMASK); T2 = _mm_add_epi64(T2, C1); T0 = _mm_add_epi64(T0, _mm_mul_epu32(C2, FIVE));
499 C1 = _mm_srli_epi64(T2, 26); C2 = _mm_srli_epi64(T0, 26); T2 = _mm_and_si128(T2, MMASK); T0 = _mm_and_si128(T0, MMASK); T3 = _mm_add_epi64(T3, C1); T1 = _mm_add_epi64(T1, C2);
500 C1 = _mm_srli_epi64(T3, 26); T3 = _mm_and_si128(T3, MMASK); T4 = _mm_add_epi64(T4, C1);
502 /* H = (H*[r^2,r^2] + [Mx,My]) */
512 /* finalize, H *= [r^2,r] */
513 r0 = ((uint64_t)p->R20.d[3] << 32) | (uint64_t)p->R20.d[1];
514 r1 = ((uint64_t)p->R21.d[3] << 32) | (uint64_t)p->R21.d[1];
515 r2 = ((uint64_t)p->R22.d[3] << 32) | (uint64_t)p->R22.d[1];
517 p->R20.d[2] = (uint32_t)( r0 ) & 0x3ffffff;
518 p->R21.d[2] = (uint32_t)((r0 >> 26) | (r1 << 18)) & 0x3ffffff;
519 p->R22.d[2] = (uint32_t)((r1 >> 8) ) & 0x3ffffff;
520 p->R23.d[2] = (uint32_t)((r1 >> 34) | (r2 << 10)) & 0x3ffffff;
521 p->R24.d[2] = (uint32_t)((r2 >> 16) ) ;
522 p->S21.d[2] = p->R21.d[2] * 5;
523 p->S22.d[2] = p->R22.d[2] * 5;
524 p->S23.d[2] = p->R23.d[2] * 5;
525 p->S24.d[2] = p->R24.d[2] * 5;
528 T0 = _mm_mul_epu32(H0, p->R20.v);
529 T1 = _mm_mul_epu32(H0, p->R21.v);
530 T2 = _mm_mul_epu32(H0, p->R22.v);
531 T3 = _mm_mul_epu32(H0, p->R23.v);
532 T4 = _mm_mul_epu32(H0, p->R24.v);
533 T5 = _mm_mul_epu32(H1, p->S24.v); T6 = _mm_mul_epu32(H1, p->R20.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
534 T5 = _mm_mul_epu32(H2, p->S23.v); T6 = _mm_mul_epu32(H2, p->S24.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
535 T5 = _mm_mul_epu32(H3, p->S22.v); T6 = _mm_mul_epu32(H3, p->S23.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
536 T5 = _mm_mul_epu32(H4, p->S21.v); T6 = _mm_mul_epu32(H4, p->S22.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
537 T5 = _mm_mul_epu32(H1, p->R21.v); T6 = _mm_mul_epu32(H1, p->R22.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
538 T5 = _mm_mul_epu32(H2, p->R20.v); T6 = _mm_mul_epu32(H2, p->R21.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
539 T5 = _mm_mul_epu32(H3, p->S24.v); T6 = _mm_mul_epu32(H3, p->R20.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
540 T5 = _mm_mul_epu32(H4, p->S23.v); T6 = _mm_mul_epu32(H4, p->S24.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
541 T5 = _mm_mul_epu32(H1, p->R23.v); T4 = _mm_add_epi64(T4, T5);
542 T5 = _mm_mul_epu32(H2, p->R22.v); T4 = _mm_add_epi64(T4, T5);
543 T5 = _mm_mul_epu32(H3, p->R21.v); T4 = _mm_add_epi64(T4, T5);
544 T5 = _mm_mul_epu32(H4, p->R20.v); T4 = _mm_add_epi64(T4, T5);
546 C1 = _mm_srli_epi64(T0, 26); C2 = _mm_srli_epi64(T3, 26); T0 = _mm_and_si128(T0, MMASK); T3 = _mm_and_si128(T3, MMASK); T1 = _mm_add_epi64(T1, C1); T4 = _mm_add_epi64(T4, C2);
547 C1 = _mm_srli_epi64(T1, 26); C2 = _mm_srli_epi64(T4, 26); T1 = _mm_and_si128(T1, MMASK); T4 = _mm_and_si128(T4, MMASK); T2 = _mm_add_epi64(T2, C1); T0 = _mm_add_epi64(T0, _mm_mul_epu32(C2, FIVE));
548 C1 = _mm_srli_epi64(T2, 26); C2 = _mm_srli_epi64(T0, 26); T2 = _mm_and_si128(T2, MMASK); T0 = _mm_and_si128(T0, MMASK); T3 = _mm_add_epi64(T3, C1); T1 = _mm_add_epi64(T1, C2);
549 C1 = _mm_srli_epi64(T3, 26); T3 = _mm_and_si128(T3, MMASK); T4 = _mm_add_epi64(T4, C1);
552 H0 = _mm_add_epi64(T0, _mm_srli_si128(T0, 8));
553 H1 = _mm_add_epi64(T1, _mm_srli_si128(T1, 8));
554 H2 = _mm_add_epi64(T2, _mm_srli_si128(T2, 8));
555 H3 = _mm_add_epi64(T3, _mm_srli_si128(T3, 8));
556 H4 = _mm_add_epi64(T4, _mm_srli_si128(T4, 8));
558 t0 = _mm_cvtsi128_si32(H0) ; c = (t0 >> 26); t0 &= 0x3ffffff;
559 t1 = _mm_cvtsi128_si32(H1) + c; c = (t1 >> 26); t1 &= 0x3ffffff;
560 t2 = _mm_cvtsi128_si32(H2) + c; c = (t2 >> 26); t2 &= 0x3ffffff;
561 t3 = _mm_cvtsi128_si32(H3) + c; c = (t3 >> 26); t3 &= 0x3ffffff;
562 t4 = _mm_cvtsi128_si32(H4) + c; c = (t4 >> 26); t4 &= 0x3ffffff;
563 t0 = t0 + (c * 5); c = (t0 >> 26); t0 &= 0x3ffffff;
566 st->HH[0] = ((t0 ) | (t1 << 26) ) & 0xfffffffffffull;
567 st->HH[1] = ((t1 >> 18) | (t2 << 8) | (t3 << 34)) & 0xfffffffffffull;
568 st->HH[2] = ((t3 >> 10) | (t4 << 16) ) & 0x3ffffffffffull;
574 CRYPTO_poly1305_update(poly1305_state *state, const unsigned char *m,
577 poly1305_state_internal *st = poly1305_aligned_state(state);
580 /* need at least 32 initial bytes to start the accelerated branch */
583 if ((st->leftover == 0) && (bytes > 32))
585 poly1305_first_block(st, m);
591 want = poly1305_min(32 - st->leftover, bytes);
592 poly1305_block_copy(st->buffer + st->leftover, m, want);
595 st->leftover += want;
596 if ((st->leftover < 32) || (bytes == 0))
598 poly1305_first_block(st, st->buffer);
604 /* handle leftover */
607 want = poly1305_min(64 - st->leftover, bytes);
608 poly1305_block_copy(st->buffer + st->leftover, m, want);
611 st->leftover += want;
612 if (st->leftover < 64)
614 poly1305_blocks(st, st->buffer, 64);
618 /* process 64 byte blocks */
621 want = (bytes & ~63);
622 poly1305_blocks(st, m, want);
629 poly1305_block_copy(st->buffer + st->leftover, m, bytes);
630 st->leftover += bytes;
635 CRYPTO_poly1305_finish(poly1305_state *state, unsigned char mac[16])
637 poly1305_state_internal *st = poly1305_aligned_state(state);
638 size_t leftover = st->leftover;
639 uint8_t *m = st->buffer;
643 uint64_t g0,g1,g2,c,nc;
644 uint64_t r0,r1,r2,s1,s2;
649 size_t consumed = poly1305_combine(st, m, leftover);
650 leftover -= consumed;
654 /* st->HH will either be 0 or have the combined result */
660 r0 = ((uint64_t)p->R20.d[3] << 32) | (uint64_t)p->R20.d[1];
661 r1 = ((uint64_t)p->R21.d[3] << 32) | (uint64_t)p->R21.d[1];
662 r2 = ((uint64_t)p->R22.d[3] << 32) | (uint64_t)p->R22.d[1];
667 goto poly1305_donna_atmost15bytes;
669 poly1305_donna_atleast16bytes:
670 t0 = U8TO64_LE(m + 0);
671 t1 = U8TO64_LE(m + 8);
672 h0 += t0 & 0xfffffffffff;
673 t0 = shr128_pair(t1, t0, 44);
674 h1 += t0 & 0xfffffffffff;
675 h2 += (t1 >> 24) | ((uint64_t)1 << 40);
678 d[0] = add128(add128(mul64x64_128(h0, r0), mul64x64_128(h1, s2)), mul64x64_128(h2, s1));
679 d[1] = add128(add128(mul64x64_128(h0, r1), mul64x64_128(h1, r0)), mul64x64_128(h2, s2));
680 d[2] = add128(add128(mul64x64_128(h0, r2), mul64x64_128(h1, r1)), mul64x64_128(h2, r0));
681 h0 = lo128(d[0]) & 0xfffffffffff; c = shr128(d[0], 44);
682 d[1] = add128_64(d[1], c); h1 = lo128(d[1]) & 0xfffffffffff; c = shr128(d[1], 44);
683 d[2] = add128_64(d[2], c); h2 = lo128(d[2]) & 0x3ffffffffff; c = shr128(d[2], 42);
688 if (leftover >= 16) goto poly1305_donna_atleast16bytes;
691 poly1305_donna_atmost15bytes:
692 if (!leftover) goto poly1305_donna_finish;
695 poly1305_block_zero(m + leftover, 16 - leftover);
700 h0 += t0 & 0xfffffffffff; t0 = shr128_pair(t1, t0, 44);
701 h1 += t0 & 0xfffffffffff;
704 goto poly1305_donna_mul;
706 poly1305_donna_finish:
707 c = (h0 >> 44); h0 &= 0xfffffffffff;
708 h1 += c; c = (h1 >> 44); h1 &= 0xfffffffffff;
709 h2 += c; c = (h2 >> 42); h2 &= 0x3ffffffffff;
712 g0 = h0 + 5; c = (g0 >> 44); g0 &= 0xfffffffffff;
713 g1 = h1 + c; c = (g1 >> 44); g1 &= 0xfffffffffff;
714 g2 = h2 + c - ((uint64_t)1 << 42);
718 h0 = (h0 & nc) | (g0 & c);
719 h1 = (h1 & nc) | (g1 & c);
720 h2 = (h2 & nc) | (g2 & c);
723 t0 = ((uint64_t)p->R23.d[3] << 32) | (uint64_t)p->R23.d[1];
724 t1 = ((uint64_t)p->R24.d[3] << 32) | (uint64_t)p->R24.d[1];
725 h0 += (t0 & 0xfffffffffff) ; c = (h0 >> 44); h0 &= 0xfffffffffff; t0 = shr128_pair(t1, t0, 44);
726 h1 += (t0 & 0xfffffffffff) + c; c = (h1 >> 44); h1 &= 0xfffffffffff; t1 = (t1 >> 24);
729 U64TO8_LE(mac + 0, ((h0 ) | (h1 << 44)));
730 U64TO8_LE(mac + 8, ((h1 >> 20) | (h2 << 24)));
733 #endif /* !OPENSSL_NO_POLY1305 */