1 /******************************************************************************
3 * Copyright 2014 Intel Corporation *
5 * Licensed under the Apache License, Version 2.0 (the "License"); *
6 * you may not use this file except in compliance with the License. *
7 * You may obtain a copy of the License at *
9 * http://www.apache.org/licenses/LICENSE-2.0 *
11 * Unless required by applicable law or agreed to in writing, software *
12 * distributed under the License is distributed on an "AS IS" BASIS, *
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
14 * See the License for the specific language governing permissions and *
15 * limitations under the License. *
17 ******************************************************************************
19 * Developers and authors: *
20 * Shay Gueron (1, 2), and Vlad Krasnov (1) *
21 * (1) Intel Corporation, Israel Development Center *
22 * (2) University of Haifa *
24 * S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with *
27 ******************************************************************************/
31 #include <openssl/bn.h>
32 #include <openssl/err.h>
33 #include <openssl/ec.h>
39 # define TOBN(hi,lo) lo,hi
41 # define TOBN(hi,lo) ((BN_ULONG)hi<<32|lo)
45 # define ALIGN32 __attribute((aligned(32)))
46 #elif defined(_MSC_VER)
47 # define ALIGN32 __declspec(align(32))
52 #define ALIGNPTR(p,N) ((unsigned char *)p+N-(size_t)p%N)
53 #define P256_LIMBS (256/BN_BITS2)
55 typedef unsigned short u16;
58 BN_ULONG X[P256_LIMBS];
59 BN_ULONG Y[P256_LIMBS];
60 BN_ULONG Z[P256_LIMBS];
64 BN_ULONG X[P256_LIMBS];
65 BN_ULONG Y[P256_LIMBS];
68 typedef P256_POINT_AFFINE PRECOMP256_ROW[64];
70 /* structure for precomputed multiples of the generator */
71 typedef struct ec_pre_comp_st {
72 const EC_GROUP *group; /* Parent EC_GROUP object */
73 size_t w; /* Window size */
75 * Constant time access to the X and Y coordinates of the pre-computed,
76 * generator multiplies, in the Montgomery domain. Pre-calculated
77 * multiplies are stored in affine form.
79 PRECOMP256_ROW *precomp;
80 void *precomp_storage;
84 /* Functions implemented in assembly */
86 * Most of below mentioned functions *preserve* the property of inputs
87 * being fully reduced, i.e. being in [0, modulus) range. Simply put if
88 * inputs are fully reduced, then output is too. Note that reverse is
89 * not true, in sense that given partially reduced inputs output can be
90 * either, not unlikely reduced. And "most" in first sentence refers to
91 * the fact that given the calculations flow one can tolerate that
92 * addition, 1st function below, produces partially reduced result *if*
93 * multiplications by 2 and 3, which customarily use addition, fully
94 * reduce it. This effectively gives two options: a) addition produces
95 * fully reduced result [as long as inputs are, just like remaining
96 * functions]; b) addition is allowed to produce partially reduced
97 * result, but multiplications by 2 and 3 perform additional reduction
98 * step. Choice between the two can be platform-specific, but it was a)
99 * in all cases so far...
101 /* Modular add: res = a+b mod P */
102 void ecp_nistz256_add(BN_ULONG res[P256_LIMBS],
103 const BN_ULONG a[P256_LIMBS],
104 const BN_ULONG b[P256_LIMBS]);
105 /* Modular mul by 2: res = 2*a mod P */
106 void ecp_nistz256_mul_by_2(BN_ULONG res[P256_LIMBS],
107 const BN_ULONG a[P256_LIMBS]);
108 /* Modular mul by 3: res = 3*a mod P */
109 void ecp_nistz256_mul_by_3(BN_ULONG res[P256_LIMBS],
110 const BN_ULONG a[P256_LIMBS]);
112 /* Modular div by 2: res = a/2 mod P */
113 void ecp_nistz256_div_by_2(BN_ULONG res[P256_LIMBS],
114 const BN_ULONG a[P256_LIMBS]);
115 /* Modular sub: res = a-b mod P */
116 void ecp_nistz256_sub(BN_ULONG res[P256_LIMBS],
117 const BN_ULONG a[P256_LIMBS],
118 const BN_ULONG b[P256_LIMBS]);
119 /* Modular neg: res = -a mod P */
120 void ecp_nistz256_neg(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS]);
121 /* Montgomery mul: res = a*b*2^-256 mod P */
122 void ecp_nistz256_mul_mont(BN_ULONG res[P256_LIMBS],
123 const BN_ULONG a[P256_LIMBS],
124 const BN_ULONG b[P256_LIMBS]);
125 /* Montgomery sqr: res = a*a*2^-256 mod P */
126 void ecp_nistz256_sqr_mont(BN_ULONG res[P256_LIMBS],
127 const BN_ULONG a[P256_LIMBS]);
128 /* Convert a number from Montgomery domain, by multiplying with 1 */
129 void ecp_nistz256_from_mont(BN_ULONG res[P256_LIMBS],
130 const BN_ULONG in[P256_LIMBS]);
131 /* Convert a number to Montgomery domain, by multiplying with 2^512 mod P*/
132 void ecp_nistz256_to_mont(BN_ULONG res[P256_LIMBS],
133 const BN_ULONG in[P256_LIMBS]);
134 /* Functions that perform constant time access to the precomputed tables */
135 void ecp_nistz256_select_w5(P256_POINT * val,
136 const P256_POINT * in_t, int index);
137 void ecp_nistz256_select_w7(P256_POINT_AFFINE * val,
138 const P256_POINT_AFFINE * in_t, int index);
140 /* One converted into the Montgomery domain */
141 static const BN_ULONG ONE[P256_LIMBS] = {
142 TOBN(0x00000000, 0x00000001), TOBN(0xffffffff, 0x00000000),
143 TOBN(0xffffffff, 0xffffffff), TOBN(0x00000000, 0xfffffffe)
146 static void *ecp_nistz256_pre_comp_dup(void *);
147 static void ecp_nistz256_pre_comp_free(void *);
148 static void ecp_nistz256_pre_comp_clear_free(void *);
149 static EC_PRE_COMP *ecp_nistz256_pre_comp_new(const EC_GROUP *group);
151 /* Precomputed tables for the default generator */
152 #include "ecp_nistz256_table.c"
154 /* Recode window to a signed digit, see ecp_nistputil.c for details */
155 static unsigned int _booth_recode_w5(unsigned int in)
159 s = ~((in >> 5) - 1);
160 d = (1 << 6) - in - 1;
161 d = (d & s) | (in & ~s);
162 d = (d >> 1) + (d & 1);
164 return (d << 1) + (s & 1);
167 static unsigned int _booth_recode_w7(unsigned int in)
171 s = ~((in >> 7) - 1);
172 d = (1 << 8) - in - 1;
173 d = (d & s) | (in & ~s);
174 d = (d >> 1) + (d & 1);
176 return (d << 1) + (s & 1);
179 static void copy_conditional(BN_ULONG dst[P256_LIMBS],
180 const BN_ULONG src[P256_LIMBS], BN_ULONG move)
182 BN_ULONG mask1 = -move;
183 BN_ULONG mask2 = ~mask1;
185 dst[0] = (src[0] & mask1) ^ (dst[0] & mask2);
186 dst[1] = (src[1] & mask1) ^ (dst[1] & mask2);
187 dst[2] = (src[2] & mask1) ^ (dst[2] & mask2);
188 dst[3] = (src[3] & mask1) ^ (dst[3] & mask2);
189 if (P256_LIMBS == 8) {
190 dst[4] = (src[4] & mask1) ^ (dst[4] & mask2);
191 dst[5] = (src[5] & mask1) ^ (dst[5] & mask2);
192 dst[6] = (src[6] & mask1) ^ (dst[6] & mask2);
193 dst[7] = (src[7] & mask1) ^ (dst[7] & mask2);
197 static BN_ULONG is_zero(BN_ULONG in)
206 static BN_ULONG is_equal(const BN_ULONG a[P256_LIMBS],
207 const BN_ULONG b[P256_LIMBS])
215 if (P256_LIMBS == 8) {
225 static BN_ULONG is_one(const BIGNUM *z)
230 if (z->top == (P256_LIMBS - P256_LIMBS / 8)) {
232 res |= a[1] ^ ONE[1];
233 res |= a[2] ^ ONE[2];
234 res |= a[3] ^ ONE[3];
235 if (P256_LIMBS == 8) {
236 res |= a[4] ^ ONE[4];
237 res |= a[5] ^ ONE[5];
238 res |= a[6] ^ ONE[6];
240 * no check for a[7] (being zero) on 32-bit platforms,
241 * because value of "one" takes only 7 limbs.
250 static int ecp_nistz256_set_words(BIGNUM *a, BN_ULONG words[P256_LIMBS])
252 if (bn_wexpand(a, P256_LIMBS) == NULL) {
253 ECerr(EC_F_ECP_NISTZ256_SET_WORDS, ERR_R_MALLOC_FAILURE);
256 memcpy(a->d, words, sizeof(BN_ULONG) * P256_LIMBS);
262 #ifndef ECP_NISTZ256_REFERENCE_IMPLEMENTATION
263 void ecp_nistz256_point_double(P256_POINT *r, const P256_POINT *a);
264 void ecp_nistz256_point_add(P256_POINT *r,
265 const P256_POINT *a, const P256_POINT *b);
266 void ecp_nistz256_point_add_affine(P256_POINT *r,
268 const P256_POINT_AFFINE *b);
270 /* Point double: r = 2*a */
271 static void ecp_nistz256_point_double(P256_POINT *r, const P256_POINT *a)
273 BN_ULONG S[P256_LIMBS];
274 BN_ULONG M[P256_LIMBS];
275 BN_ULONG Zsqr[P256_LIMBS];
276 BN_ULONG tmp0[P256_LIMBS];
278 const BN_ULONG *in_x = a->X;
279 const BN_ULONG *in_y = a->Y;
280 const BN_ULONG *in_z = a->Z;
282 BN_ULONG *res_x = r->X;
283 BN_ULONG *res_y = r->Y;
284 BN_ULONG *res_z = r->Z;
286 ecp_nistz256_mul_by_2(S, in_y);
288 ecp_nistz256_sqr_mont(Zsqr, in_z);
290 ecp_nistz256_sqr_mont(S, S);
292 ecp_nistz256_mul_mont(res_z, in_z, in_y);
293 ecp_nistz256_mul_by_2(res_z, res_z);
295 ecp_nistz256_add(M, in_x, Zsqr);
296 ecp_nistz256_sub(Zsqr, in_x, Zsqr);
298 ecp_nistz256_sqr_mont(res_y, S);
299 ecp_nistz256_div_by_2(res_y, res_y);
301 ecp_nistz256_mul_mont(M, M, Zsqr);
302 ecp_nistz256_mul_by_3(M, M);
304 ecp_nistz256_mul_mont(S, S, in_x);
305 ecp_nistz256_mul_by_2(tmp0, S);
307 ecp_nistz256_sqr_mont(res_x, M);
309 ecp_nistz256_sub(res_x, res_x, tmp0);
310 ecp_nistz256_sub(S, S, res_x);
312 ecp_nistz256_mul_mont(S, S, M);
313 ecp_nistz256_sub(res_y, S, res_y);
316 /* Point addition: r = a+b */
317 static void ecp_nistz256_point_add(P256_POINT *r,
318 const P256_POINT *a, const P256_POINT *b)
320 BN_ULONG U2[P256_LIMBS], S2[P256_LIMBS];
321 BN_ULONG U1[P256_LIMBS], S1[P256_LIMBS];
322 BN_ULONG Z1sqr[P256_LIMBS];
323 BN_ULONG Z2sqr[P256_LIMBS];
324 BN_ULONG H[P256_LIMBS], R[P256_LIMBS];
325 BN_ULONG Hsqr[P256_LIMBS];
326 BN_ULONG Rsqr[P256_LIMBS];
327 BN_ULONG Hcub[P256_LIMBS];
329 BN_ULONG res_x[P256_LIMBS];
330 BN_ULONG res_y[P256_LIMBS];
331 BN_ULONG res_z[P256_LIMBS];
333 BN_ULONG in1infty, in2infty;
335 const BN_ULONG *in1_x = a->X;
336 const BN_ULONG *in1_y = a->Y;
337 const BN_ULONG *in1_z = a->Z;
339 const BN_ULONG *in2_x = b->X;
340 const BN_ULONG *in2_y = b->Y;
341 const BN_ULONG *in2_z = b->Z;
343 /* We encode infinity as (0,0), which is not on the curve,
345 in1infty = (in1_x[0] | in1_x[1] | in1_x[2] | in1_x[3] |
346 in1_y[0] | in1_y[1] | in1_y[2] | in1_y[3]);
348 in1infty |= (in1_x[4] | in1_x[5] | in1_x[6] | in1_x[7] |
349 in1_y[4] | in1_y[5] | in1_y[6] | in1_y[7]);
351 in2infty = (in2_x[0] | in2_x[1] | in2_x[2] | in2_x[3] |
352 in2_y[0] | in2_y[1] | in2_y[2] | in2_y[3]);
354 in2infty |= (in2_x[4] | in2_x[5] | in2_x[6] | in2_x[7] |
355 in2_y[4] | in2_y[5] | in2_y[6] | in2_y[7]);
357 in1infty = is_zero(in1infty);
358 in2infty = is_zero(in2infty);
360 ecp_nistz256_sqr_mont(Z2sqr, in2_z); /* Z2^2 */
361 ecp_nistz256_sqr_mont(Z1sqr, in1_z); /* Z1^2 */
363 ecp_nistz256_mul_mont(S1, Z2sqr, in2_z); /* S1 = Z2^3 */
364 ecp_nistz256_mul_mont(S2, Z1sqr, in1_z); /* S2 = Z1^3 */
366 ecp_nistz256_mul_mont(S1, S1, in1_y); /* S1 = Y1*Z2^3 */
367 ecp_nistz256_mul_mont(S2, S2, in2_y); /* S2 = Y2*Z1^3 */
368 ecp_nistz256_sub(R, S2, S1); /* R = S2 - S1 */
370 ecp_nistz256_mul_mont(U1, in1_x, Z2sqr); /* U1 = X1*Z2^2 */
371 ecp_nistz256_mul_mont(U2, in2_x, Z1sqr); /* U2 = X2*Z1^2 */
372 ecp_nistz256_sub(H, U2, U1); /* H = U2 - U1 */
375 * This should not happen during sign/ecdh, so no constant time violation
377 if (is_equal(U1, U2) && !in1infty && !in2infty) {
378 if (is_equal(S1, S2)) {
379 ecp_nistz256_point_double(r, a);
382 memset(r, 0, sizeof(*r));
387 ecp_nistz256_sqr_mont(Rsqr, R); /* R^2 */
388 ecp_nistz256_mul_mont(res_z, H, in1_z); /* Z3 = H*Z1*Z2 */
389 ecp_nistz256_sqr_mont(Hsqr, H); /* H^2 */
390 ecp_nistz256_mul_mont(res_z, res_z, in2_z); /* Z3 = H*Z1*Z2 */
391 ecp_nistz256_mul_mont(Hcub, Hsqr, H); /* H^3 */
393 ecp_nistz256_mul_mont(U2, U1, Hsqr); /* U1*H^2 */
394 ecp_nistz256_mul_by_2(Hsqr, U2); /* 2*U1*H^2 */
396 ecp_nistz256_sub(res_x, Rsqr, Hsqr);
397 ecp_nistz256_sub(res_x, res_x, Hcub);
399 ecp_nistz256_sub(res_y, U2, res_x);
401 ecp_nistz256_mul_mont(S2, S1, Hcub);
402 ecp_nistz256_mul_mont(res_y, R, res_y);
403 ecp_nistz256_sub(res_y, res_y, S2);
405 copy_conditional(res_x, in2_x, in1infty);
406 copy_conditional(res_y, in2_y, in1infty);
407 copy_conditional(res_z, in2_z, in1infty);
409 copy_conditional(res_x, in1_x, in2infty);
410 copy_conditional(res_y, in1_y, in2infty);
411 copy_conditional(res_z, in1_z, in2infty);
413 memcpy(r->X, res_x, sizeof(res_x));
414 memcpy(r->Y, res_y, sizeof(res_y));
415 memcpy(r->Z, res_z, sizeof(res_z));
418 /* Point addition when b is known to be affine: r = a+b */
419 static void ecp_nistz256_point_add_affine(P256_POINT *r,
421 const P256_POINT_AFFINE *b)
423 BN_ULONG U2[P256_LIMBS], S2[P256_LIMBS];
424 BN_ULONG Z1sqr[P256_LIMBS];
425 BN_ULONG H[P256_LIMBS], R[P256_LIMBS];
426 BN_ULONG Hsqr[P256_LIMBS];
427 BN_ULONG Rsqr[P256_LIMBS];
428 BN_ULONG Hcub[P256_LIMBS];
430 BN_ULONG res_x[P256_LIMBS];
431 BN_ULONG res_y[P256_LIMBS];
432 BN_ULONG res_z[P256_LIMBS];
434 BN_ULONG in1infty, in2infty;
436 const BN_ULONG *in1_x = a->X;
437 const BN_ULONG *in1_y = a->Y;
438 const BN_ULONG *in1_z = a->Z;
440 const BN_ULONG *in2_x = b->X;
441 const BN_ULONG *in2_y = b->Y;
444 * In affine representation we encode infty as (0,0), which is not on the
447 in1infty = (in1_x[0] | in1_x[1] | in1_x[2] | in1_x[3] |
448 in1_y[0] | in1_y[1] | in1_y[2] | in1_y[3]);
450 in1infty |= (in1_x[4] | in1_x[5] | in1_x[6] | in1_x[7] |
451 in1_y[4] | in1_y[5] | in1_y[6] | in1_y[7]);
453 in2infty = (in2_x[0] | in2_x[1] | in2_x[2] | in2_x[3] |
454 in2_y[0] | in2_y[1] | in2_y[2] | in2_y[3]);
456 in2infty |= (in2_x[4] | in2_x[5] | in2_x[6] | in2_x[7] |
457 in2_y[4] | in2_y[5] | in2_y[6] | in2_y[7]);
459 in1infty = is_zero(in1infty);
460 in2infty = is_zero(in2infty);
462 ecp_nistz256_sqr_mont(Z1sqr, in1_z); /* Z1^2 */
464 ecp_nistz256_mul_mont(U2, in2_x, Z1sqr); /* U2 = X2*Z1^2 */
465 ecp_nistz256_sub(H, U2, in1_x); /* H = U2 - U1 */
467 ecp_nistz256_mul_mont(S2, Z1sqr, in1_z); /* S2 = Z1^3 */
469 ecp_nistz256_mul_mont(res_z, H, in1_z); /* Z3 = H*Z1*Z2 */
471 ecp_nistz256_mul_mont(S2, S2, in2_y); /* S2 = Y2*Z1^3 */
472 ecp_nistz256_sub(R, S2, in1_y); /* R = S2 - S1 */
474 ecp_nistz256_sqr_mont(Hsqr, H); /* H^2 */
475 ecp_nistz256_sqr_mont(Rsqr, R); /* R^2 */
476 ecp_nistz256_mul_mont(Hcub, Hsqr, H); /* H^3 */
478 ecp_nistz256_mul_mont(U2, in1_x, Hsqr); /* U1*H^2 */
479 ecp_nistz256_mul_by_2(Hsqr, U2); /* 2*U1*H^2 */
481 ecp_nistz256_sub(res_x, Rsqr, Hsqr);
482 ecp_nistz256_sub(res_x, res_x, Hcub);
483 ecp_nistz256_sub(H, U2, res_x);
485 ecp_nistz256_mul_mont(S2, in1_y, Hcub);
486 ecp_nistz256_mul_mont(H, H, R);
487 ecp_nistz256_sub(res_y, H, S2);
489 copy_conditional(res_x, in2_x, in1infty);
490 copy_conditional(res_x, in1_x, in2infty);
492 copy_conditional(res_y, in2_y, in1infty);
493 copy_conditional(res_y, in1_y, in2infty);
495 copy_conditional(res_z, ONE, in1infty);
496 copy_conditional(res_z, in1_z, in2infty);
498 memcpy(r->X, res_x, sizeof(res_x));
499 memcpy(r->Y, res_y, sizeof(res_y));
500 memcpy(r->Z, res_z, sizeof(res_z));
504 /* r = in^-1 mod p */
505 static void ecp_nistz256_mod_inverse(BN_ULONG r[P256_LIMBS],
506 const BN_ULONG in[P256_LIMBS])
509 * The poly is ffffffff 00000001 00000000 00000000 00000000 ffffffff
510 * ffffffff ffffffff We use FLT and used poly-2 as exponent
512 BN_ULONG p2[P256_LIMBS];
513 BN_ULONG p4[P256_LIMBS];
514 BN_ULONG p8[P256_LIMBS];
515 BN_ULONG p16[P256_LIMBS];
516 BN_ULONG p32[P256_LIMBS];
517 BN_ULONG res[P256_LIMBS];
520 ecp_nistz256_sqr_mont(res, in);
521 ecp_nistz256_mul_mont(p2, res, in); /* 3*p */
523 ecp_nistz256_sqr_mont(res, p2);
524 ecp_nistz256_sqr_mont(res, res);
525 ecp_nistz256_mul_mont(p4, res, p2); /* f*p */
527 ecp_nistz256_sqr_mont(res, p4);
528 ecp_nistz256_sqr_mont(res, res);
529 ecp_nistz256_sqr_mont(res, res);
530 ecp_nistz256_sqr_mont(res, res);
531 ecp_nistz256_mul_mont(p8, res, p4); /* ff*p */
533 ecp_nistz256_sqr_mont(res, p8);
534 for (i = 0; i < 7; i++)
535 ecp_nistz256_sqr_mont(res, res);
536 ecp_nistz256_mul_mont(p16, res, p8); /* ffff*p */
538 ecp_nistz256_sqr_mont(res, p16);
539 for (i = 0; i < 15; i++)
540 ecp_nistz256_sqr_mont(res, res);
541 ecp_nistz256_mul_mont(p32, res, p16); /* ffffffff*p */
543 ecp_nistz256_sqr_mont(res, p32);
544 for (i = 0; i < 31; i++)
545 ecp_nistz256_sqr_mont(res, res);
546 ecp_nistz256_mul_mont(res, res, in);
548 for (i = 0; i < 32 * 4; i++)
549 ecp_nistz256_sqr_mont(res, res);
550 ecp_nistz256_mul_mont(res, res, p32);
552 for (i = 0; i < 32; i++)
553 ecp_nistz256_sqr_mont(res, res);
554 ecp_nistz256_mul_mont(res, res, p32);
556 for (i = 0; i < 16; i++)
557 ecp_nistz256_sqr_mont(res, res);
558 ecp_nistz256_mul_mont(res, res, p16);
560 for (i = 0; i < 8; i++)
561 ecp_nistz256_sqr_mont(res, res);
562 ecp_nistz256_mul_mont(res, res, p8);
564 ecp_nistz256_sqr_mont(res, res);
565 ecp_nistz256_sqr_mont(res, res);
566 ecp_nistz256_sqr_mont(res, res);
567 ecp_nistz256_sqr_mont(res, res);
568 ecp_nistz256_mul_mont(res, res, p4);
570 ecp_nistz256_sqr_mont(res, res);
571 ecp_nistz256_sqr_mont(res, res);
572 ecp_nistz256_mul_mont(res, res, p2);
574 ecp_nistz256_sqr_mont(res, res);
575 ecp_nistz256_sqr_mont(res, res);
576 ecp_nistz256_mul_mont(res, res, in);
578 memcpy(r, res, sizeof(res));
582 * ecp_nistz256_bignum_to_field_elem copies the contents of |in| to |out| and
583 * returns one if it fits. Otherwise it returns zero.
585 static int ecp_nistz256_bignum_to_field_elem(BN_ULONG out[P256_LIMBS],
588 if (in->top > P256_LIMBS)
591 memset(out, 0, sizeof(BN_ULONG) * P256_LIMBS);
592 memcpy(out, in->d, sizeof(BN_ULONG) * in->top);
596 /* r = sum(scalar[i]*point[i]) */
597 static int ecp_nistz256_windowed_mul(const EC_GROUP *group,
599 const BIGNUM **scalar,
600 const EC_POINT **point,
601 int num, BN_CTX *ctx)
606 unsigned char (*p_str)[33] = NULL;
607 const unsigned int window_size = 5;
608 const unsigned int mask = (1 << (window_size + 1)) - 1;
610 BN_ULONG tmp[P256_LIMBS];
611 ALIGN32 P256_POINT h;
612 const BIGNUM **scalars = NULL;
613 P256_POINT (*table)[16] = NULL;
614 void *table_storage = NULL;
617 OPENSSL_malloc(num * 16 * sizeof(P256_POINT) + 64)) == NULL
619 OPENSSL_malloc(num * 33 * sizeof(unsigned char))) == NULL
620 || (scalars = OPENSSL_malloc(num * sizeof(BIGNUM *))) == NULL) {
621 ECerr(EC_F_ECP_NISTZ256_WINDOWED_MUL, ERR_R_MALLOC_FAILURE);
624 table = (void *)ALIGNPTR(table_storage, 64);
627 for (i = 0; i < num; i++) {
628 P256_POINT *row = table[i];
630 /* This is an unusual input, we don't guarantee constant-timeness. */
631 if ((BN_num_bits(scalar[i]) > 256) || BN_is_negative(scalar[i])) {
634 if ((mod = BN_CTX_get(ctx)) == NULL)
636 if (!BN_nnmod(mod, scalar[i], &group->order, ctx)) {
637 ECerr(EC_F_ECP_NISTZ256_WINDOWED_MUL, ERR_R_BN_LIB);
642 scalars[i] = scalar[i];
644 for (j = 0; j < scalars[i]->top * BN_BYTES; j += BN_BYTES) {
645 BN_ULONG d = scalars[i]->d[j / BN_BYTES];
647 p_str[i][j + 0] = d & 0xff;
648 p_str[i][j + 1] = (d >> 8) & 0xff;
649 p_str[i][j + 2] = (d >> 16) & 0xff;
650 p_str[i][j + 3] = (d >>= 24) & 0xff;
653 p_str[i][j + 4] = d & 0xff;
654 p_str[i][j + 5] = (d >> 8) & 0xff;
655 p_str[i][j + 6] = (d >> 16) & 0xff;
656 p_str[i][j + 7] = (d >> 24) & 0xff;
662 /* table[0] is implicitly (0,0,0) (the point at infinity),
663 * therefore it is not stored. All other values are actually
664 * stored with an offset of -1 in table.
667 if (!ecp_nistz256_bignum_to_field_elem(row[1 - 1].X, &point[i]->X)
668 || !ecp_nistz256_bignum_to_field_elem(row[1 - 1].Y, &point[i]->Y)
669 || !ecp_nistz256_bignum_to_field_elem(row[1 - 1].Z, &point[i]->Z)) {
670 ECerr(EC_F_ECP_NISTZ256_WINDOWED_MUL, EC_R_COORDINATES_OUT_OF_RANGE);
674 ecp_nistz256_point_double(&row[ 2 - 1], &row[ 1 - 1]);
675 ecp_nistz256_point_add (&row[ 3 - 1], &row[ 2 - 1], &row[1 - 1]);
676 ecp_nistz256_point_double(&row[ 4 - 1], &row[ 2 - 1]);
677 ecp_nistz256_point_double(&row[ 6 - 1], &row[ 3 - 1]);
678 ecp_nistz256_point_double(&row[ 8 - 1], &row[ 4 - 1]);
679 ecp_nistz256_point_double(&row[12 - 1], &row[ 6 - 1]);
680 ecp_nistz256_point_add (&row[ 5 - 1], &row[ 4 - 1], &row[1 - 1]);
681 ecp_nistz256_point_add (&row[ 7 - 1], &row[ 6 - 1], &row[1 - 1]);
682 ecp_nistz256_point_add (&row[ 9 - 1], &row[ 8 - 1], &row[1 - 1]);
683 ecp_nistz256_point_add (&row[13 - 1], &row[12 - 1], &row[1 - 1]);
684 ecp_nistz256_point_double(&row[14 - 1], &row[ 7 - 1]);
685 ecp_nistz256_point_double(&row[10 - 1], &row[ 5 - 1]);
686 ecp_nistz256_point_add (&row[15 - 1], &row[14 - 1], &row[1 - 1]);
687 ecp_nistz256_point_add (&row[11 - 1], &row[10 - 1], &row[1 - 1]);
688 ecp_nistz256_point_add (&row[16 - 1], &row[15 - 1], &row[1 - 1]);
693 wvalue = p_str[0][(index - 1) / 8];
694 wvalue = (wvalue >> ((index - 1) % 8)) & mask;
696 ecp_nistz256_select_w5(r, table[0], _booth_recode_w5(wvalue) >> 1);
699 for (i = (index == 255 ? 1 : 0); i < num; i++) {
700 unsigned int off = (index - 1) / 8;
702 wvalue = p_str[i][off] | p_str[i][off + 1] << 8;
703 wvalue = (wvalue >> ((index - 1) % 8)) & mask;
705 wvalue = _booth_recode_w5(wvalue);
707 ecp_nistz256_select_w5(&h, table[i], wvalue >> 1);
709 ecp_nistz256_neg(tmp, h.Y);
710 copy_conditional(h.Y, tmp, (wvalue & 1));
712 ecp_nistz256_point_add(r, r, &h);
715 index -= window_size;
717 ecp_nistz256_point_double(r, r);
718 ecp_nistz256_point_double(r, r);
719 ecp_nistz256_point_double(r, r);
720 ecp_nistz256_point_double(r, r);
721 ecp_nistz256_point_double(r, r);
725 for (i = 0; i < num; i++) {
726 wvalue = p_str[i][0];
727 wvalue = (wvalue << 1) & mask;
729 wvalue = _booth_recode_w5(wvalue);
731 ecp_nistz256_select_w5(&h, table[i], wvalue >> 1);
733 ecp_nistz256_neg(tmp, h.Y);
734 copy_conditional(h.Y, tmp, wvalue & 1);
736 ecp_nistz256_point_add(r, r, &h);
742 OPENSSL_free(table_storage);
746 OPENSSL_free(scalars);
750 /* Coordinates of G, for which we have precomputed tables */
751 const static BN_ULONG def_xG[P256_LIMBS] = {
752 TOBN(0x79e730d4, 0x18a9143c), TOBN(0x75ba95fc, 0x5fedb601),
753 TOBN(0x79fb732b, 0x77622510), TOBN(0x18905f76, 0xa53755c6)
756 const static BN_ULONG def_yG[P256_LIMBS] = {
757 TOBN(0xddf25357, 0xce95560a), TOBN(0x8b4ab8e4, 0xba19e45c),
758 TOBN(0xd2e88688, 0xdd21f325), TOBN(0x8571ff18, 0x25885d85)
762 * ecp_nistz256_is_affine_G returns one if |generator| is the standard, P-256
765 static int ecp_nistz256_is_affine_G(const EC_POINT *generator)
767 return (generator->X.top == P256_LIMBS) &&
768 (generator->Y.top == P256_LIMBS) &&
769 is_equal(generator->X.d, def_xG) &&
770 is_equal(generator->Y.d, def_yG) && is_one(&generator->Z);
773 static int ecp_nistz256_mult_precompute(EC_GROUP *group, BN_CTX *ctx)
776 * We precompute a table for a Booth encoded exponent (wNAF) based
777 * computation. Each table holds 64 values for safe access, with an
778 * implicit value of infinity at index zero. We use window of size 7, and
779 * therefore require ceil(256/7) = 37 tables.
782 EC_POINT *P = NULL, *T = NULL;
783 const EC_POINT *generator;
784 EC_PRE_COMP *pre_comp;
785 BN_CTX *new_ctx = NULL;
786 int i, j, k, ret = 0;
789 PRECOMP256_ROW *preComputedTable = NULL;
790 unsigned char *precomp_storage = NULL;
792 /* if there is an old EC_PRE_COMP object, throw it away */
793 EC_EX_DATA_free_data(&group->extra_data, ecp_nistz256_pre_comp_dup,
794 ecp_nistz256_pre_comp_free,
795 ecp_nistz256_pre_comp_clear_free);
797 generator = EC_GROUP_get0_generator(group);
798 if (generator == NULL) {
799 ECerr(EC_F_ECP_NISTZ256_MULT_PRECOMPUTE, EC_R_UNDEFINED_GENERATOR);
803 if (ecp_nistz256_is_affine_G(generator)) {
805 * No need to calculate tables for the standard generator because we
806 * have them statically.
811 if ((pre_comp = ecp_nistz256_pre_comp_new(group)) == NULL)
815 ctx = new_ctx = BN_CTX_new();
821 order = BN_CTX_get(ctx);
826 if (!EC_GROUP_get_order(group, order, ctx))
829 if (BN_is_zero(order)) {
830 ECerr(EC_F_ECP_NISTZ256_MULT_PRECOMPUTE, EC_R_UNKNOWN_ORDER);
836 if ((precomp_storage =
837 OPENSSL_malloc(37 * 64 * sizeof(P256_POINT_AFFINE) + 64)) == NULL) {
838 ECerr(EC_F_ECP_NISTZ256_MULT_PRECOMPUTE, ERR_R_MALLOC_FAILURE);
841 preComputedTable = (void *)ALIGNPTR(precomp_storage, 64);
844 P = EC_POINT_new(group);
845 T = EC_POINT_new(group);
846 if (P == NULL || T == NULL)
850 * The zero entry is implicitly infinity, and we skip it, storing other
851 * values with -1 offset.
853 if (!EC_POINT_copy(T, generator))
856 for (k = 0; k < 64; k++) {
857 if (!EC_POINT_copy(P, T))
859 for (j = 0; j < 37; j++) {
861 * It would be faster to use EC_POINTs_make_affine and
862 * make multiple points affine at the same time.
864 if (!EC_POINT_make_affine(group, P, ctx))
866 if (!ecp_nistz256_bignum_to_field_elem(preComputedTable[j][k].X,
868 !ecp_nistz256_bignum_to_field_elem(preComputedTable[j][k].Y,
870 ECerr(EC_F_ECP_NISTZ256_MULT_PRECOMPUTE,
871 EC_R_COORDINATES_OUT_OF_RANGE);
874 for (i = 0; i < 7; i++) {
875 if (!EC_POINT_dbl(group, P, P, ctx))
879 if (!EC_POINT_add(group, T, T, generator, ctx))
883 pre_comp->group = group;
885 pre_comp->precomp = preComputedTable;
886 pre_comp->precomp_storage = precomp_storage;
888 precomp_storage = NULL;
890 if (!EC_EX_DATA_set_data(&group->extra_data, pre_comp,
891 ecp_nistz256_pre_comp_dup,
892 ecp_nistz256_pre_comp_free,
893 ecp_nistz256_pre_comp_clear_free)) {
904 BN_CTX_free(new_ctx);
907 ecp_nistz256_pre_comp_free(pre_comp);
909 OPENSSL_free(precomp_storage);
918 * Note that by default ECP_NISTZ256_AVX2 is undefined. While it's great
919 * code processing 4 points in parallel, corresponding serial operation
920 * is several times slower, because it uses 29x29=58-bit multiplication
921 * as opposite to 64x64=128-bit in integer-only scalar case. As result
922 * it doesn't provide *significant* performance improvement. Note that
923 * just defining ECP_NISTZ256_AVX2 is not sufficient to make it work,
924 * you'd need to compile even asm/ecp_nistz256-avx.pl module.
926 #if defined(ECP_NISTZ256_AVX2)
927 # if !(defined(__x86_64) || defined(__x86_64__)) || \
928 defined(_M_AMD64) || defined(_MX64)) || \
929 !(defined(__GNUC__) || defined(_MSC_VER)) /* this is for ALIGN32 */
930 # undef ECP_NISTZ256_AVX2
932 /* Constant time access, loading four values, from four consecutive tables */
933 void ecp_nistz256_avx2_select_w7(P256_POINT_AFFINE * val,
934 const P256_POINT_AFFINE * in_t, int index);
935 void ecp_nistz256_avx2_multi_select_w7(void *result, const void *in, int index0,
936 int index1, int index2, int index3);
937 void ecp_nistz256_avx2_transpose_convert(void *RESULTx4, const void *in);
938 void ecp_nistz256_avx2_convert_transpose_back(void *result, const void *Ax4);
939 void ecp_nistz256_avx2_point_add_affine_x4(void *RESULTx4, const void *Ax4,
941 void ecp_nistz256_avx2_point_add_affines_x4(void *RESULTx4, const void *Ax4,
943 void ecp_nistz256_avx2_to_mont(void *RESULTx4, const void *Ax4);
944 void ecp_nistz256_avx2_from_mont(void *RESULTx4, const void *Ax4);
945 void ecp_nistz256_avx2_set1(void *RESULTx4);
946 int ecp_nistz_avx2_eligible(void);
948 static void booth_recode_w7(unsigned char *sign,
949 unsigned char *digit, unsigned char in)
953 s = ~((in >> 7) - 1);
954 d = (1 << 8) - in - 1;
955 d = (d & s) | (in & ~s);
956 d = (d >> 1) + (d & 1);
963 * ecp_nistz256_avx2_mul_g performs multiplication by G, using only the
964 * precomputed table. It does 4 affine point additions in parallel,
965 * significantly speeding up point multiplication for a fixed value.
967 static void ecp_nistz256_avx2_mul_g(P256_POINT *r,
968 unsigned char p_str[33],
969 const P256_POINT_AFFINE(*preComputedTable)[64])
971 const unsigned int window_size = 7;
972 const unsigned int mask = (1 << (window_size + 1)) - 1;
974 /* Using 4 windows at a time */
975 unsigned char sign0, digit0;
976 unsigned char sign1, digit1;
977 unsigned char sign2, digit2;
978 unsigned char sign3, digit3;
979 unsigned int index = 0;
980 BN_ULONG tmp[P256_LIMBS];
983 ALIGN32 BN_ULONG aX4[4 * 9 * 3] = { 0 };
984 ALIGN32 BN_ULONG bX4[4 * 9 * 2] = { 0 };
985 ALIGN32 P256_POINT_AFFINE point_arr[P256_LIMBS];
986 ALIGN32 P256_POINT res_point_arr[P256_LIMBS];
988 /* Initial four windows */
989 wvalue = *((u16 *) & p_str[0]);
990 wvalue = (wvalue << 1) & mask;
991 index += window_size;
992 booth_recode_w7(&sign0, &digit0, wvalue);
993 wvalue = *((u16 *) & p_str[(index - 1) / 8]);
994 wvalue = (wvalue >> ((index - 1) % 8)) & mask;
995 index += window_size;
996 booth_recode_w7(&sign1, &digit1, wvalue);
997 wvalue = *((u16 *) & p_str[(index - 1) / 8]);
998 wvalue = (wvalue >> ((index - 1) % 8)) & mask;
999 index += window_size;
1000 booth_recode_w7(&sign2, &digit2, wvalue);
1001 wvalue = *((u16 *) & p_str[(index - 1) / 8]);
1002 wvalue = (wvalue >> ((index - 1) % 8)) & mask;
1003 index += window_size;
1004 booth_recode_w7(&sign3, &digit3, wvalue);
1006 ecp_nistz256_avx2_multi_select_w7(point_arr, preComputedTable[0],
1007 digit0, digit1, digit2, digit3);
1009 ecp_nistz256_neg(tmp, point_arr[0].Y);
1010 copy_conditional(point_arr[0].Y, tmp, sign0);
1011 ecp_nistz256_neg(tmp, point_arr[1].Y);
1012 copy_conditional(point_arr[1].Y, tmp, sign1);
1013 ecp_nistz256_neg(tmp, point_arr[2].Y);
1014 copy_conditional(point_arr[2].Y, tmp, sign2);
1015 ecp_nistz256_neg(tmp, point_arr[3].Y);
1016 copy_conditional(point_arr[3].Y, tmp, sign3);
1018 ecp_nistz256_avx2_transpose_convert(aX4, point_arr);
1019 ecp_nistz256_avx2_to_mont(aX4, aX4);
1020 ecp_nistz256_avx2_to_mont(&aX4[4 * 9], &aX4[4 * 9]);
1021 ecp_nistz256_avx2_set1(&aX4[4 * 9 * 2]);
1023 wvalue = *((u16 *) & p_str[(index - 1) / 8]);
1024 wvalue = (wvalue >> ((index - 1) % 8)) & mask;
1025 index += window_size;
1026 booth_recode_w7(&sign0, &digit0, wvalue);
1027 wvalue = *((u16 *) & p_str[(index - 1) / 8]);
1028 wvalue = (wvalue >> ((index - 1) % 8)) & mask;
1029 index += window_size;
1030 booth_recode_w7(&sign1, &digit1, wvalue);
1031 wvalue = *((u16 *) & p_str[(index - 1) / 8]);
1032 wvalue = (wvalue >> ((index - 1) % 8)) & mask;
1033 index += window_size;
1034 booth_recode_w7(&sign2, &digit2, wvalue);
1035 wvalue = *((u16 *) & p_str[(index - 1) / 8]);
1036 wvalue = (wvalue >> ((index - 1) % 8)) & mask;
1037 index += window_size;
1038 booth_recode_w7(&sign3, &digit3, wvalue);
1040 ecp_nistz256_avx2_multi_select_w7(point_arr, preComputedTable[4 * 1],
1041 digit0, digit1, digit2, digit3);
1043 ecp_nistz256_neg(tmp, point_arr[0].Y);
1044 copy_conditional(point_arr[0].Y, tmp, sign0);
1045 ecp_nistz256_neg(tmp, point_arr[1].Y);
1046 copy_conditional(point_arr[1].Y, tmp, sign1);
1047 ecp_nistz256_neg(tmp, point_arr[2].Y);
1048 copy_conditional(point_arr[2].Y, tmp, sign2);
1049 ecp_nistz256_neg(tmp, point_arr[3].Y);
1050 copy_conditional(point_arr[3].Y, tmp, sign3);
1052 ecp_nistz256_avx2_transpose_convert(bX4, point_arr);
1053 ecp_nistz256_avx2_to_mont(bX4, bX4);
1054 ecp_nistz256_avx2_to_mont(&bX4[4 * 9], &bX4[4 * 9]);
1055 /* Optimized when both inputs are affine */
1056 ecp_nistz256_avx2_point_add_affines_x4(aX4, aX4, bX4);
1058 for (i = 2; i < 9; i++) {
1059 wvalue = *((u16 *) & p_str[(index - 1) / 8]);
1060 wvalue = (wvalue >> ((index - 1) % 8)) & mask;
1061 index += window_size;
1062 booth_recode_w7(&sign0, &digit0, wvalue);
1063 wvalue = *((u16 *) & p_str[(index - 1) / 8]);
1064 wvalue = (wvalue >> ((index - 1) % 8)) & mask;
1065 index += window_size;
1066 booth_recode_w7(&sign1, &digit1, wvalue);
1067 wvalue = *((u16 *) & p_str[(index - 1) / 8]);
1068 wvalue = (wvalue >> ((index - 1) % 8)) & mask;
1069 index += window_size;
1070 booth_recode_w7(&sign2, &digit2, wvalue);
1071 wvalue = *((u16 *) & p_str[(index - 1) / 8]);
1072 wvalue = (wvalue >> ((index - 1) % 8)) & mask;
1073 index += window_size;
1074 booth_recode_w7(&sign3, &digit3, wvalue);
1076 ecp_nistz256_avx2_multi_select_w7(point_arr,
1077 preComputedTable[4 * i],
1078 digit0, digit1, digit2, digit3);
1080 ecp_nistz256_neg(tmp, point_arr[0].Y);
1081 copy_conditional(point_arr[0].Y, tmp, sign0);
1082 ecp_nistz256_neg(tmp, point_arr[1].Y);
1083 copy_conditional(point_arr[1].Y, tmp, sign1);
1084 ecp_nistz256_neg(tmp, point_arr[2].Y);
1085 copy_conditional(point_arr[2].Y, tmp, sign2);
1086 ecp_nistz256_neg(tmp, point_arr[3].Y);
1087 copy_conditional(point_arr[3].Y, tmp, sign3);
1089 ecp_nistz256_avx2_transpose_convert(bX4, point_arr);
1090 ecp_nistz256_avx2_to_mont(bX4, bX4);
1091 ecp_nistz256_avx2_to_mont(&bX4[4 * 9], &bX4[4 * 9]);
1093 ecp_nistz256_avx2_point_add_affine_x4(aX4, aX4, bX4);
1096 ecp_nistz256_avx2_from_mont(&aX4[4 * 9 * 0], &aX4[4 * 9 * 0]);
1097 ecp_nistz256_avx2_from_mont(&aX4[4 * 9 * 1], &aX4[4 * 9 * 1]);
1098 ecp_nistz256_avx2_from_mont(&aX4[4 * 9 * 2], &aX4[4 * 9 * 2]);
1100 ecp_nistz256_avx2_convert_transpose_back(res_point_arr, aX4);
1101 /* Last window is performed serially */
1102 wvalue = *((u16 *) & p_str[(index - 1) / 8]);
1103 wvalue = (wvalue >> ((index - 1) % 8)) & mask;
1104 booth_recode_w7(&sign0, &digit0, wvalue);
1105 ecp_nistz256_avx2_select_w7((P256_POINT_AFFINE *) r,
1106 preComputedTable[36], digit0);
1107 ecp_nistz256_neg(tmp, r->Y);
1108 copy_conditional(r->Y, tmp, sign0);
1109 memcpy(r->Z, ONE, sizeof(ONE));
1110 /* Sum the four windows */
1111 ecp_nistz256_point_add(r, r, &res_point_arr[0]);
1112 ecp_nistz256_point_add(r, r, &res_point_arr[1]);
1113 ecp_nistz256_point_add(r, r, &res_point_arr[2]);
1114 ecp_nistz256_point_add(r, r, &res_point_arr[3]);
1119 static int ecp_nistz256_set_from_affine(EC_POINT *out, const EC_GROUP *group,
1120 const P256_POINT_AFFINE *in,
1124 BN_ULONG d_x[P256_LIMBS], d_y[P256_LIMBS];
1127 memcpy(d_x, in->X, sizeof(d_x));
1129 x.dmax = x.top = P256_LIMBS;
1131 x.flags = BN_FLG_STATIC_DATA;
1133 memcpy(d_y, in->Y, sizeof(d_y));
1135 y.dmax = y.top = P256_LIMBS;
1137 y.flags = BN_FLG_STATIC_DATA;
1139 ret = EC_POINT_set_affine_coordinates_GFp(group, out, &x, &y, ctx);
1144 /* r = scalar*G + sum(scalars[i]*points[i]) */
1145 static int ecp_nistz256_points_mul(const EC_GROUP *group,
1147 const BIGNUM *scalar,
1149 const EC_POINT *points[],
1150 const BIGNUM *scalars[], BN_CTX *ctx)
1152 int i = 0, ret = 0, no_precomp_for_generator = 0, p_is_infinity = 0;
1154 unsigned char p_str[33] = { 0 };
1155 const PRECOMP256_ROW *preComputedTable = NULL;
1156 const EC_PRE_COMP *pre_comp = NULL;
1157 const EC_POINT *generator = NULL;
1158 unsigned int index = 0;
1159 BN_CTX *new_ctx = NULL;
1160 const BIGNUM **new_scalars = NULL;
1161 const EC_POINT **new_points = NULL;
1162 const unsigned int window_size = 7;
1163 const unsigned int mask = (1 << (window_size + 1)) - 1;
1164 unsigned int wvalue;
1167 P256_POINT_AFFINE a;
1171 if (group->meth != r->meth) {
1172 ECerr(EC_F_ECP_NISTZ256_POINTS_MUL, EC_R_INCOMPATIBLE_OBJECTS);
1176 if ((scalar == NULL) && (num == 0))
1177 return EC_POINT_set_to_infinity(group, r);
1179 for (j = 0; j < num; j++) {
1180 if (group->meth != points[j]->meth) {
1181 ECerr(EC_F_ECP_NISTZ256_POINTS_MUL, EC_R_INCOMPATIBLE_OBJECTS);
1187 ctx = new_ctx = BN_CTX_new();
1195 generator = EC_GROUP_get0_generator(group);
1196 if (generator == NULL) {
1197 ECerr(EC_F_ECP_NISTZ256_POINTS_MUL, EC_R_UNDEFINED_GENERATOR);
1201 /* look if we can use precomputed multiples of generator */
1203 EC_EX_DATA_get_data(group->extra_data, ecp_nistz256_pre_comp_dup,
1204 ecp_nistz256_pre_comp_free,
1205 ecp_nistz256_pre_comp_clear_free);
1209 * If there is a precomputed table for the generator, check that
1210 * it was generated with the same generator.
1212 EC_POINT *pre_comp_generator = EC_POINT_new(group);
1213 if (pre_comp_generator == NULL)
1216 if (!ecp_nistz256_set_from_affine
1217 (pre_comp_generator, group, pre_comp->precomp[0], ctx)) {
1218 EC_POINT_free(pre_comp_generator);
1222 if (0 == EC_POINT_cmp(group, generator, pre_comp_generator, ctx))
1223 preComputedTable = (const PRECOMP256_ROW *)pre_comp->precomp;
1225 EC_POINT_free(pre_comp_generator);
1228 if (preComputedTable == NULL && ecp_nistz256_is_affine_G(generator)) {
1230 * If there is no precomputed data, but the generator
1231 * is the default, a hardcoded table of precomputed
1232 * data is used. This is because applications, such as
1233 * Apache, do not use EC_KEY_precompute_mult.
1235 preComputedTable = (const PRECOMP256_ROW *)ecp_nistz256_precomputed;
1238 if (preComputedTable) {
1239 if ((BN_num_bits(scalar) > 256)
1240 || BN_is_negative(scalar)) {
1241 if ((tmp_scalar = BN_CTX_get(ctx)) == NULL)
1244 if (!BN_nnmod(tmp_scalar, scalar, &group->order, ctx)) {
1245 ECerr(EC_F_ECP_NISTZ256_POINTS_MUL, ERR_R_BN_LIB);
1248 scalar = tmp_scalar;
1251 for (i = 0; i < scalar->top * BN_BYTES; i += BN_BYTES) {
1252 BN_ULONG d = scalar->d[i / BN_BYTES];
1254 p_str[i + 0] = d & 0xff;
1255 p_str[i + 1] = (d >> 8) & 0xff;
1256 p_str[i + 2] = (d >> 16) & 0xff;
1257 p_str[i + 3] = (d >>= 24) & 0xff;
1258 if (BN_BYTES == 8) {
1260 p_str[i + 4] = d & 0xff;
1261 p_str[i + 5] = (d >> 8) & 0xff;
1262 p_str[i + 6] = (d >> 16) & 0xff;
1263 p_str[i + 7] = (d >> 24) & 0xff;
1270 #if defined(ECP_NISTZ256_AVX2)
1271 if (ecp_nistz_avx2_eligible()) {
1272 ecp_nistz256_avx2_mul_g(&p.p, p_str, preComputedTable);
1277 wvalue = (p_str[0] << 1) & mask;
1278 index += window_size;
1280 wvalue = _booth_recode_w7(wvalue);
1282 ecp_nistz256_select_w7(&p.a, preComputedTable[0], wvalue >> 1);
1284 ecp_nistz256_neg(p.p.Z, p.p.Y);
1285 copy_conditional(p.p.Y, p.p.Z, wvalue & 1);
1287 memcpy(p.p.Z, ONE, sizeof(ONE));
1289 for (i = 1; i < 37; i++) {
1290 unsigned int off = (index - 1) / 8;
1291 wvalue = p_str[off] | p_str[off + 1] << 8;
1292 wvalue = (wvalue >> ((index - 1) % 8)) & mask;
1293 index += window_size;
1295 wvalue = _booth_recode_w7(wvalue);
1297 ecp_nistz256_select_w7(&t.a,
1298 preComputedTable[i], wvalue >> 1);
1300 ecp_nistz256_neg(t.p.Z, t.a.Y);
1301 copy_conditional(t.a.Y, t.p.Z, wvalue & 1);
1303 ecp_nistz256_point_add_affine(&p.p, &p.p, &t.a);
1308 no_precomp_for_generator = 1;
1313 if (no_precomp_for_generator) {
1315 * Without a precomputed table for the generator, it has to be
1316 * handled like a normal point.
1318 new_scalars = OPENSSL_malloc((num + 1) * sizeof(BIGNUM *));
1320 ECerr(EC_F_ECP_NISTZ256_POINTS_MUL, ERR_R_MALLOC_FAILURE);
1324 new_points = OPENSSL_malloc((num + 1) * sizeof(EC_POINT *));
1326 ECerr(EC_F_ECP_NISTZ256_POINTS_MUL, ERR_R_MALLOC_FAILURE);
1330 memcpy(new_scalars, scalars, num * sizeof(BIGNUM *));
1331 new_scalars[num] = scalar;
1332 memcpy(new_points, points, num * sizeof(EC_POINT *));
1333 new_points[num] = generator;
1335 scalars = new_scalars;
1336 points = new_points;
1341 P256_POINT *out = &t.p;
1345 if (!ecp_nistz256_windowed_mul(group, out, scalars, points, num, ctx))
1349 ecp_nistz256_point_add(&p.p, &p.p, out);
1352 /* Not constant-time, but we're only operating on the public output. */
1353 if (!ecp_nistz256_set_words(&r->X, p.p.X) ||
1354 !ecp_nistz256_set_words(&r->Y, p.p.Y) ||
1355 !ecp_nistz256_set_words(&r->Z, p.p.Z)) {
1358 r->Z_is_one = is_one(&r->Z) & 1;
1365 BN_CTX_free(new_ctx);
1367 OPENSSL_free(new_points);
1369 OPENSSL_free(new_scalars);
1373 static int ecp_nistz256_get_affine(const EC_GROUP *group,
1374 const EC_POINT *point,
1375 BIGNUM *x, BIGNUM *y, BN_CTX *ctx)
1377 BN_ULONG z_inv2[P256_LIMBS];
1378 BN_ULONG z_inv3[P256_LIMBS];
1379 BN_ULONG x_aff[P256_LIMBS];
1380 BN_ULONG y_aff[P256_LIMBS];
1381 BN_ULONG point_x[P256_LIMBS], point_y[P256_LIMBS], point_z[P256_LIMBS];
1382 BN_ULONG x_ret[P256_LIMBS], y_ret[P256_LIMBS];
1384 if (EC_POINT_is_at_infinity(group, point)) {
1385 ECerr(EC_F_ECP_NISTZ256_GET_AFFINE, EC_R_POINT_AT_INFINITY);
1389 if (!ecp_nistz256_bignum_to_field_elem(point_x, &point->X) ||
1390 !ecp_nistz256_bignum_to_field_elem(point_y, &point->Y) ||
1391 !ecp_nistz256_bignum_to_field_elem(point_z, &point->Z)) {
1392 ECerr(EC_F_ECP_NISTZ256_GET_AFFINE, EC_R_COORDINATES_OUT_OF_RANGE);
1396 ecp_nistz256_mod_inverse(z_inv3, point_z);
1397 ecp_nistz256_sqr_mont(z_inv2, z_inv3);
1398 ecp_nistz256_mul_mont(x_aff, z_inv2, point_x);
1401 ecp_nistz256_from_mont(x_ret, x_aff);
1402 if (!ecp_nistz256_set_words(x, x_ret))
1407 ecp_nistz256_mul_mont(z_inv3, z_inv3, z_inv2);
1408 ecp_nistz256_mul_mont(y_aff, z_inv3, point_y);
1409 ecp_nistz256_from_mont(y_ret, y_aff);
1410 if (!ecp_nistz256_set_words(y, y_ret))
1417 static EC_PRE_COMP *ecp_nistz256_pre_comp_new(const EC_GROUP *group)
1419 EC_PRE_COMP *ret = NULL;
1424 ret = (EC_PRE_COMP *)OPENSSL_malloc(sizeof(EC_PRE_COMP));
1427 ECerr(EC_F_ECP_NISTZ256_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
1432 ret->w = 6; /* default */
1433 ret->precomp = NULL;
1434 ret->precomp_storage = NULL;
1435 ret->references = 1;
1439 static void *ecp_nistz256_pre_comp_dup(void *src_)
1441 EC_PRE_COMP *src = src_;
1443 /* no need to actually copy, these objects never change! */
1444 CRYPTO_add(&src->references, 1, CRYPTO_LOCK_EC_PRE_COMP);
1449 static void ecp_nistz256_pre_comp_free(void *pre_)
1452 EC_PRE_COMP *pre = pre_;
1457 i = CRYPTO_add(&pre->references, -1, CRYPTO_LOCK_EC_PRE_COMP);
1461 if (pre->precomp_storage)
1462 OPENSSL_free(pre->precomp_storage);
1467 static void ecp_nistz256_pre_comp_clear_free(void *pre_)
1470 EC_PRE_COMP *pre = pre_;
1475 i = CRYPTO_add(&pre->references, -1, CRYPTO_LOCK_EC_PRE_COMP);
1479 if (pre->precomp_storage) {
1480 OPENSSL_cleanse(pre->precomp,
1481 32 * sizeof(unsigned char) * (1 << pre->w) * 2 * 37);
1482 OPENSSL_free(pre->precomp_storage);
1484 OPENSSL_cleanse(pre, sizeof *pre);
1488 static int ecp_nistz256_window_have_precompute_mult(const EC_GROUP *group)
1490 /* There is a hard-coded table for the default generator. */
1491 const EC_POINT *generator = EC_GROUP_get0_generator(group);
1492 if (generator != NULL && ecp_nistz256_is_affine_G(generator)) {
1493 /* There is a hard-coded table for the default generator. */
1497 return EC_EX_DATA_get_data(group->extra_data, ecp_nistz256_pre_comp_dup,
1498 ecp_nistz256_pre_comp_free,
1499 ecp_nistz256_pre_comp_clear_free) != NULL;
1502 const EC_METHOD *EC_GFp_nistz256_method(void)
1504 static const EC_METHOD ret = {
1505 EC_FLAGS_DEFAULT_OCT,
1506 NID_X9_62_prime_field,
1507 ec_GFp_mont_group_init,
1508 ec_GFp_mont_group_finish,
1509 ec_GFp_mont_group_clear_finish,
1510 ec_GFp_mont_group_copy,
1511 ec_GFp_mont_group_set_curve,
1512 ec_GFp_simple_group_get_curve,
1513 ec_GFp_simple_group_get_degree,
1514 ec_GFp_simple_group_check_discriminant,
1515 ec_GFp_simple_point_init,
1516 ec_GFp_simple_point_finish,
1517 ec_GFp_simple_point_clear_finish,
1518 ec_GFp_simple_point_copy,
1519 ec_GFp_simple_point_set_to_infinity,
1520 ec_GFp_simple_set_Jprojective_coordinates_GFp,
1521 ec_GFp_simple_get_Jprojective_coordinates_GFp,
1522 ec_GFp_simple_point_set_affine_coordinates,
1523 ecp_nistz256_get_affine,
1527 ec_GFp_simple_invert,
1528 ec_GFp_simple_is_at_infinity,
1529 ec_GFp_simple_is_on_curve,
1531 ec_GFp_simple_make_affine,
1532 ec_GFp_simple_points_make_affine,
1533 ecp_nistz256_points_mul, /* mul */
1534 ecp_nistz256_mult_precompute, /* precompute_mult */
1535 ecp_nistz256_window_have_precompute_mult, /* have_precompute_mult */
1536 ec_GFp_mont_field_mul,
1537 ec_GFp_mont_field_sqr,
1539 ec_GFp_mont_field_encode,
1540 ec_GFp_mont_field_decode,
1541 ec_GFp_mont_field_set_to_one