2 * Copyright 2017-2018 The OpenSSL Project Authors. All Rights Reserved.
3 * Copyright 2014 Cryptography Research, Inc.
5 * Licensed under the OpenSSL license (the "License"). You may not use
6 * this file except in compliance with the License. You can obtain a copy
7 * in the file LICENSE in the source distribution or at
8 * https://www.openssl.org/source/license.html
10 * Originally written by Mike Hamburg
14 void gf_mul(gf_s * __restrict__ cs, const gf as, const gf bs)
16 const uint64_t *a = as->limb, *b = bs->limb;
17 uint64_t *c = cs->limb;
18 __uint128_t accum0 = 0, accum1 = 0, accum2;
19 uint64_t mask = (1ull << 56) - 1;
20 uint64_t aa[4], bb[4], bbb[4];
23 for (i = 0; i < 4; i++) {
24 aa[i] = a[i] + a[i + 4];
25 bb[i] = b[i] + b[i + 4];
26 bbb[i] = bb[i] + b[i + 4];
29 int I_HATE_UNROLLED_LOOPS = 0;
31 if (I_HATE_UNROLLED_LOOPS) {
33 * The compiler probably won't unroll this, so it's like 80% slower.
35 for (i = 0; i < 4; i++) {
39 for (j = 0; j <= i; j++) {
40 accum2 += widemul(a[j], b[i - j]);
41 accum1 += widemul(aa[j], bb[i - j]);
42 accum0 += widemul(a[j + 4], b[i - j + 4]);
45 accum2 += widemul(a[j], b[i - j + 8]);
46 accum1 += widemul(aa[j], bbb[i - j + 4]);
47 accum0 += widemul(a[j + 4], bb[i - j + 4]);
53 c[i] = ((uint64_t)(accum0)) & mask;
54 c[i + 4] = ((uint64_t)(accum1)) & mask;
60 accum2 = widemul(a[0], b[0]);
61 accum1 += widemul(aa[0], bb[0]);
62 accum0 += widemul(a[4], b[4]);
64 accum2 += widemul(a[1], b[7]);
65 accum1 += widemul(aa[1], bbb[3]);
66 accum0 += widemul(a[5], bb[3]);
68 accum2 += widemul(a[2], b[6]);
69 accum1 += widemul(aa[2], bbb[2]);
70 accum0 += widemul(a[6], bb[2]);
72 accum2 += widemul(a[3], b[5]);
73 accum1 += widemul(aa[3], bbb[1]);
74 accum0 += widemul(a[7], bb[1]);
79 c[0] = ((uint64_t)(accum0)) & mask;
80 c[4] = ((uint64_t)(accum1)) & mask;
85 accum2 = widemul(a[0], b[1]);
86 accum1 += widemul(aa[0], bb[1]);
87 accum0 += widemul(a[4], b[5]);
89 accum2 += widemul(a[1], b[0]);
90 accum1 += widemul(aa[1], bb[0]);
91 accum0 += widemul(a[5], b[4]);
93 accum2 += widemul(a[2], b[7]);
94 accum1 += widemul(aa[2], bbb[3]);
95 accum0 += widemul(a[6], bb[3]);
97 accum2 += widemul(a[3], b[6]);
98 accum1 += widemul(aa[3], bbb[2]);
99 accum0 += widemul(a[7], bb[2]);
104 c[1] = ((uint64_t)(accum0)) & mask;
105 c[5] = ((uint64_t)(accum1)) & mask;
110 accum2 = widemul(a[0], b[2]);
111 accum1 += widemul(aa[0], bb[2]);
112 accum0 += widemul(a[4], b[6]);
114 accum2 += widemul(a[1], b[1]);
115 accum1 += widemul(aa[1], bb[1]);
116 accum0 += widemul(a[5], b[5]);
118 accum2 += widemul(a[2], b[0]);
119 accum1 += widemul(aa[2], bb[0]);
120 accum0 += widemul(a[6], b[4]);
122 accum2 += widemul(a[3], b[7]);
123 accum1 += widemul(aa[3], bbb[3]);
124 accum0 += widemul(a[7], bb[3]);
129 c[2] = ((uint64_t)(accum0)) & mask;
130 c[6] = ((uint64_t)(accum1)) & mask;
135 accum2 = widemul(a[0], b[3]);
136 accum1 += widemul(aa[0], bb[3]);
137 accum0 += widemul(a[4], b[7]);
139 accum2 += widemul(a[1], b[2]);
140 accum1 += widemul(aa[1], bb[2]);
141 accum0 += widemul(a[5], b[6]);
143 accum2 += widemul(a[2], b[1]);
144 accum1 += widemul(aa[2], bb[1]);
145 accum0 += widemul(a[6], b[5]);
147 accum2 += widemul(a[3], b[0]);
148 accum1 += widemul(aa[3], bb[0]);
149 accum0 += widemul(a[7], b[4]);
154 c[3] = ((uint64_t)(accum0)) & mask;
155 c[7] = ((uint64_t)(accum1)) & mask;
159 } /* !I_HATE_UNROLLED_LOOPS */
164 c[4] = ((uint64_t)(accum0)) & mask;
165 c[0] = ((uint64_t)(accum1)) & mask;
170 c[5] += ((uint64_t)(accum0));
171 c[1] += ((uint64_t)(accum1));
174 void gf_mulw_unsigned(gf_s * __restrict__ cs, const gf as, uint32_t b)
176 const uint64_t *a = as->limb;
177 uint64_t *c = cs->limb;
178 __uint128_t accum0 = 0, accum4 = 0;
179 uint64_t mask = (1ull << 56) - 1;
182 for (i = 0; i < 4; i++) {
183 accum0 += widemul(b, a[i]);
184 accum4 += widemul(b, a[i + 4]);
185 c[i] = accum0 & mask;
187 c[i + 4] = accum4 & mask;
191 accum0 += accum4 + c[4];
192 c[4] = accum0 & mask;
193 c[5] += accum0 >> 56;
196 c[0] = accum4 & mask;
197 c[1] += accum4 >> 56;
200 void gf_sqr(gf_s * __restrict__ cs, const gf as)
202 const uint64_t *a = as->limb;
203 uint64_t *c = cs->limb;
204 __uint128_t accum0 = 0, accum1 = 0, accum2;
205 uint64_t mask = (1ull << 56) - 1;
208 /* For some reason clang doesn't vectorize this without prompting? */
210 for (i = 0; i < 4; i++) {
211 aa[i] = a[i] + a[i + 4];
214 accum2 = widemul(a[0], a[3]);
215 accum0 = widemul(aa[0], aa[3]);
216 accum1 = widemul(a[4], a[7]);
218 accum2 += widemul(a[1], a[2]);
219 accum0 += widemul(aa[1], aa[2]);
220 accum1 += widemul(a[5], a[6]);
225 c[3] = ((uint64_t)(accum1)) << 1 & mask;
226 c[7] = ((uint64_t)(accum0)) << 1 & mask;
231 accum0 += widemul(2 * aa[1], aa[3]);
232 accum1 += widemul(2 * a[5], a[7]);
233 accum0 += widemul(aa[2], aa[2]);
236 accum0 -= widemul(2 * a[1], a[3]);
237 accum1 += widemul(a[6], a[6]);
239 accum2 = widemul(a[0], a[0]);
243 accum0 -= widemul(a[2], a[2]);
244 accum1 += widemul(aa[0], aa[0]);
245 accum0 += widemul(a[4], a[4]);
247 c[0] = ((uint64_t)(accum0)) & mask;
248 c[4] = ((uint64_t)(accum1)) & mask;
253 accum2 = widemul(2 * aa[2], aa[3]);
254 accum0 -= widemul(2 * a[2], a[3]);
255 accum1 += widemul(2 * a[6], a[7]);
260 accum2 = widemul(2 * a[0], a[1]);
261 accum1 += widemul(2 * aa[0], aa[1]);
262 accum0 += widemul(2 * a[4], a[5]);
267 c[1] = ((uint64_t)(accum0)) & mask;
268 c[5] = ((uint64_t)(accum1)) & mask;
273 accum2 = widemul(aa[3], aa[3]);
274 accum0 -= widemul(a[3], a[3]);
275 accum1 += widemul(a[7], a[7]);
280 accum2 = widemul(2 * a[0], a[2]);
281 accum1 += widemul(2 * aa[0], aa[2]);
282 accum0 += widemul(2 * a[4], a[6]);
284 accum2 += widemul(a[1], a[1]);
285 accum1 += widemul(aa[1], aa[1]);
286 accum0 += widemul(a[5], a[5]);
291 c[2] = ((uint64_t)(accum0)) & mask;
292 c[6] = ((uint64_t)(accum1)) & mask;
299 c[3] = ((uint64_t)(accum0)) & mask;
300 c[7] = ((uint64_t)(accum1)) & mask;
302 /* we could almost stop here, but it wouldn't be stable, so... */
306 c[4] += ((uint64_t)(accum0)) + ((uint64_t)(accum1));
307 c[0] += ((uint64_t)(accum1));