2 * Copyright 2017 The OpenSSL Project Authors. All Rights Reserved.
3 * Copyright 2014 Cryptography Research, Inc.
5 * Licensed under the OpenSSL license (the "License"). You may not use
6 * this file except in compliance with the License. You can obtain a copy
7 * in the file LICENSE in the source distribution or at
8 * https://www.openssl.org/source/license.html
10 * Originally written by Mike Hamburg
14 void gf_mul(gf_s * __restrict__ cs, const gf as, const gf bs)
16 const uint64_t *a = as->limb, *b = bs->limb;
17 uint64_t *c = cs->limb;
19 __uint128_t accum0 = 0, accum1 = 0, accum2;
20 uint64_t mask = (1ull << 56) - 1;
22 uint64_t aa[4], bb[4], bbb[4];
25 for (i = 0; i < 4; i++) {
26 aa[i] = a[i] + a[i + 4];
27 bb[i] = b[i] + b[i + 4];
28 bbb[i] = bb[i] + b[i + 4];
31 int I_HATE_UNROLLED_LOOPS = 0;
33 if (I_HATE_UNROLLED_LOOPS) {
35 * The compiler probably won't unroll this, so it's like 80% slower.
37 for (i = 0; i < 4; i++) {
41 for (j = 0; j <= i; j++) {
42 accum2 += widemul(a[j], b[i - j]);
43 accum1 += widemul(aa[j], bb[i - j]);
44 accum0 += widemul(a[j + 4], b[i - j + 4]);
47 accum2 += widemul(a[j], b[i - j + 8]);
48 accum1 += widemul(aa[j], bbb[i - j + 4]);
49 accum0 += widemul(a[j + 4], bb[i - j + 4]);
55 c[i] = ((uint64_t)(accum0)) & mask;
56 c[i + 4] = ((uint64_t)(accum1)) & mask;
62 accum2 = widemul(a[0], b[0]);
63 accum1 += widemul(aa[0], bb[0]);
64 accum0 += widemul(a[4], b[4]);
66 accum2 += widemul(a[1], b[7]);
67 accum1 += widemul(aa[1], bbb[3]);
68 accum0 += widemul(a[5], bb[3]);
70 accum2 += widemul(a[2], b[6]);
71 accum1 += widemul(aa[2], bbb[2]);
72 accum0 += widemul(a[6], bb[2]);
74 accum2 += widemul(a[3], b[5]);
75 accum1 += widemul(aa[3], bbb[1]);
76 accum0 += widemul(a[7], bb[1]);
81 c[0] = ((uint64_t)(accum0)) & mask;
82 c[4] = ((uint64_t)(accum1)) & mask;
87 accum2 = widemul(a[0], b[1]);
88 accum1 += widemul(aa[0], bb[1]);
89 accum0 += widemul(a[4], b[5]);
91 accum2 += widemul(a[1], b[0]);
92 accum1 += widemul(aa[1], bb[0]);
93 accum0 += widemul(a[5], b[4]);
95 accum2 += widemul(a[2], b[7]);
96 accum1 += widemul(aa[2], bbb[3]);
97 accum0 += widemul(a[6], bb[3]);
99 accum2 += widemul(a[3], b[6]);
100 accum1 += widemul(aa[3], bbb[2]);
101 accum0 += widemul(a[7], bb[2]);
106 c[1] = ((uint64_t)(accum0)) & mask;
107 c[5] = ((uint64_t)(accum1)) & mask;
112 accum2 = widemul(a[0], b[2]);
113 accum1 += widemul(aa[0], bb[2]);
114 accum0 += widemul(a[4], b[6]);
116 accum2 += widemul(a[1], b[1]);
117 accum1 += widemul(aa[1], bb[1]);
118 accum0 += widemul(a[5], b[5]);
120 accum2 += widemul(a[2], b[0]);
121 accum1 += widemul(aa[2], bb[0]);
122 accum0 += widemul(a[6], b[4]);
124 accum2 += widemul(a[3], b[7]);
125 accum1 += widemul(aa[3], bbb[3]);
126 accum0 += widemul(a[7], bb[3]);
131 c[2] = ((uint64_t)(accum0)) & mask;
132 c[6] = ((uint64_t)(accum1)) & mask;
137 accum2 = widemul(a[0], b[3]);
138 accum1 += widemul(aa[0], bb[3]);
139 accum0 += widemul(a[4], b[7]);
141 accum2 += widemul(a[1], b[2]);
142 accum1 += widemul(aa[1], bb[2]);
143 accum0 += widemul(a[5], b[6]);
145 accum2 += widemul(a[2], b[1]);
146 accum1 += widemul(aa[2], bb[1]);
147 accum0 += widemul(a[6], b[5]);
149 accum2 += widemul(a[3], b[0]);
150 accum1 += widemul(aa[3], bb[0]);
151 accum0 += widemul(a[7], b[4]);
156 c[3] = ((uint64_t)(accum0)) & mask;
157 c[7] = ((uint64_t)(accum1)) & mask;
161 } /* !I_HATE_UNROLLED_LOOPS */
166 c[4] = ((uint64_t)(accum0)) & mask;
167 c[0] = ((uint64_t)(accum1)) & mask;
172 c[5] += ((uint64_t)(accum0));
173 c[1] += ((uint64_t)(accum1));
176 void gf_mulw_unsigned(gf_s * __restrict__ cs, const gf as, uint32_t b)
178 const uint64_t *a = as->limb;
179 uint64_t *c = cs->limb;
181 __uint128_t accum0 = 0, accum4 = 0;
182 uint64_t mask = (1ull << 56) - 1;
185 for (i = 0; i < 4; i++) {
186 accum0 += widemul(b, a[i]);
187 accum4 += widemul(b, a[i + 4]);
188 c[i] = accum0 & mask;
190 c[i + 4] = accum4 & mask;
194 accum0 += accum4 + c[4];
195 c[4] = accum0 & mask;
196 c[5] += accum0 >> 56;
199 c[0] = accum4 & mask;
200 c[1] += accum4 >> 56;
203 void gf_sqr(gf_s * __restrict__ cs, const gf as)
205 const uint64_t *a = as->limb;
206 uint64_t *c = cs->limb;
208 __uint128_t accum0 = 0, accum1 = 0, accum2;
209 uint64_t mask = (1ull << 56) - 1;
213 /* For some reason clang doesn't vectorize this without prompting? */
215 for (i = 0; i < 4; i++) {
216 aa[i] = a[i] + a[i + 4];
219 accum2 = widemul(a[0], a[3]);
220 accum0 = widemul(aa[0], aa[3]);
221 accum1 = widemul(a[4], a[7]);
223 accum2 += widemul(a[1], a[2]);
224 accum0 += widemul(aa[1], aa[2]);
225 accum1 += widemul(a[5], a[6]);
230 c[3] = ((uint64_t)(accum1)) << 1 & mask;
231 c[7] = ((uint64_t)(accum0)) << 1 & mask;
236 accum0 += widemul(2 * aa[1], aa[3]);
237 accum1 += widemul(2 * a[5], a[7]);
238 accum0 += widemul(aa[2], aa[2]);
241 accum0 -= widemul(2 * a[1], a[3]);
242 accum1 += widemul(a[6], a[6]);
244 accum2 = widemul(a[0], a[0]);
248 accum0 -= widemul(a[2], a[2]);
249 accum1 += widemul(aa[0], aa[0]);
250 accum0 += widemul(a[4], a[4]);
252 c[0] = ((uint64_t)(accum0)) & mask;
253 c[4] = ((uint64_t)(accum1)) & mask;
258 accum2 = widemul(2 * aa[2], aa[3]);
259 accum0 -= widemul(2 * a[2], a[3]);
260 accum1 += widemul(2 * a[6], a[7]);
265 accum2 = widemul(2 * a[0], a[1]);
266 accum1 += widemul(2 * aa[0], aa[1]);
267 accum0 += widemul(2 * a[4], a[5]);
272 c[1] = ((uint64_t)(accum0)) & mask;
273 c[5] = ((uint64_t)(accum1)) & mask;
278 accum2 = widemul(aa[3], aa[3]);
279 accum0 -= widemul(a[3], a[3]);
280 accum1 += widemul(a[7], a[7]);
285 accum2 = widemul(2 * a[0], a[2]);
286 accum1 += widemul(2 * aa[0], aa[2]);
287 accum0 += widemul(2 * a[4], a[6]);
289 accum2 += widemul(a[1], a[1]);
290 accum1 += widemul(aa[1], aa[1]);
291 accum0 += widemul(a[5], a[5]);
296 c[2] = ((uint64_t)(accum0)) & mask;
297 c[6] = ((uint64_t)(accum1)) & mask;
304 c[3] = ((uint64_t)(accum0)) & mask;
305 c[7] = ((uint64_t)(accum1)) & mask;
307 /* we could almost stop here, but it wouldn't be stable, so... */
311 c[4] += ((uint64_t)(accum0)) + ((uint64_t)(accum1));
312 c[0] += ((uint64_t)(accum1));