2 * Copyright 2010-2018 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the OpenSSL license (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
10 /* Copyright 2011 Google Inc.
12 * Licensed under the Apache License, Version 2.0 (the "License");
14 * you may not use this file except in compliance with the License.
15 * You may obtain a copy of the License at
17 * http://www.apache.org/licenses/LICENSE-2.0
19 * Unless required by applicable law or agreed to in writing, software
20 * distributed under the License is distributed on an "AS IS" BASIS,
21 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
22 * See the License for the specific language governing permissions and
23 * limitations under the License.
27 * A 64-bit implementation of the NIST P-224 elliptic curve point multiplication
29 * Inspired by Daniel J. Bernstein's public domain nistp224 implementation
30 * and Adam Langley's public domain 64-bit C implementation of curve25519
33 #include <openssl/opensslconf.h>
34 #ifdef OPENSSL_NO_EC_NISTP_64_GCC_128
35 NON_EMPTY_TRANSLATION_UNIT
40 # include <openssl/err.h>
43 # if defined(__SIZEOF_INT128__) && __SIZEOF_INT128__==16
44 /* even with gcc, the typedef won't work for 32-bit platforms */
45 typedef __uint128_t uint128_t; /* nonstandard; implemented by gcc on 64-bit
48 # error "Need GCC 4.0 or later to define type uint128_t"
54 /******************************************************************************/
56 * INTERNAL REPRESENTATION OF FIELD ELEMENTS
58 * Field elements are represented as a_0 + 2^56*a_1 + 2^112*a_2 + 2^168*a_3
59 * using 64-bit coefficients called 'limbs',
60 * and sometimes (for multiplication results) as
61 * b_0 + 2^56*b_1 + 2^112*b_2 + 2^168*b_3 + 2^224*b_4 + 2^280*b_5 + 2^336*b_6
62 * using 128-bit coefficients called 'widelimbs'.
63 * A 4-limb representation is an 'felem';
64 * a 7-widelimb representation is a 'widefelem'.
65 * Even within felems, bits of adjacent limbs overlap, and we don't always
66 * reduce the representations: we ensure that inputs to each felem
67 * multiplication satisfy a_i < 2^60, so outputs satisfy b_i < 4*2^60*2^60,
68 * and fit into a 128-bit word without overflow. The coefficients are then
69 * again partially reduced to obtain an felem satisfying a_i < 2^57.
70 * We only reduce to the unique minimal representation at the end of the
74 typedef uint64_t limb;
75 typedef uint128_t widelimb;
77 typedef limb felem[4];
78 typedef widelimb widefelem[7];
81 * Field element represented as a byte array. 28*8 = 224 bits is also the
82 * group order size for the elliptic curve, and we also use this type for
83 * scalars for point multiplication.
85 typedef u8 felem_bytearray[28];
87 static const felem_bytearray nistp224_curve_params[5] = {
88 {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* p */
89 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00,
90 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
91 {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* a */
92 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
93 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE},
94 {0xB4, 0x05, 0x0A, 0x85, 0x0C, 0x04, 0xB3, 0xAB, 0xF5, 0x41, /* b */
95 0x32, 0x56, 0x50, 0x44, 0xB0, 0xB7, 0xD7, 0xBF, 0xD8, 0xBA,
96 0x27, 0x0B, 0x39, 0x43, 0x23, 0x55, 0xFF, 0xB4},
97 {0xB7, 0x0E, 0x0C, 0xBD, 0x6B, 0xB4, 0xBF, 0x7F, 0x32, 0x13, /* x */
98 0x90, 0xB9, 0x4A, 0x03, 0xC1, 0xD3, 0x56, 0xC2, 0x11, 0x22,
99 0x34, 0x32, 0x80, 0xD6, 0x11, 0x5C, 0x1D, 0x21},
100 {0xbd, 0x37, 0x63, 0x88, 0xb5, 0xf7, 0x23, 0xfb, 0x4c, 0x22, /* y */
101 0xdf, 0xe6, 0xcd, 0x43, 0x75, 0xa0, 0x5a, 0x07, 0x47, 0x64,
102 0x44, 0xd5, 0x81, 0x99, 0x85, 0x00, 0x7e, 0x34}
106 * Precomputed multiples of the standard generator
107 * Points are given in coordinates (X, Y, Z) where Z normally is 1
108 * (0 for the point at infinity).
109 * For each field element, slice a_0 is word 0, etc.
111 * The table has 2 * 16 elements, starting with the following:
112 * index | bits | point
113 * ------+---------+------------------------------
116 * 2 | 0 0 1 0 | 2^56G
117 * 3 | 0 0 1 1 | (2^56 + 1)G
118 * 4 | 0 1 0 0 | 2^112G
119 * 5 | 0 1 0 1 | (2^112 + 1)G
120 * 6 | 0 1 1 0 | (2^112 + 2^56)G
121 * 7 | 0 1 1 1 | (2^112 + 2^56 + 1)G
122 * 8 | 1 0 0 0 | 2^168G
123 * 9 | 1 0 0 1 | (2^168 + 1)G
124 * 10 | 1 0 1 0 | (2^168 + 2^56)G
125 * 11 | 1 0 1 1 | (2^168 + 2^56 + 1)G
126 * 12 | 1 1 0 0 | (2^168 + 2^112)G
127 * 13 | 1 1 0 1 | (2^168 + 2^112 + 1)G
128 * 14 | 1 1 1 0 | (2^168 + 2^112 + 2^56)G
129 * 15 | 1 1 1 1 | (2^168 + 2^112 + 2^56 + 1)G
130 * followed by a copy of this with each element multiplied by 2^28.
132 * The reason for this is so that we can clock bits into four different
133 * locations when doing simple scalar multiplies against the base point,
134 * and then another four locations using the second 16 elements.
136 static const felem gmul[2][16][3] = {
140 {{0x3280d6115c1d21, 0xc1d356c2112234, 0x7f321390b94a03, 0xb70e0cbd6bb4bf},
141 {0xd5819985007e34, 0x75a05a07476444, 0xfb4c22dfe6cd43, 0xbd376388b5f723},
143 {{0xfd9675666ebbe9, 0xbca7664d40ce5e, 0x2242df8d8a2a43, 0x1f49bbb0f99bc5},
144 {0x29e0b892dc9c43, 0xece8608436e662, 0xdc858f185310d0, 0x9812dd4eb8d321},
146 {{0x6d3e678d5d8eb8, 0x559eed1cb362f1, 0x16e9a3bbce8a3f, 0xeedcccd8c2a748},
147 {0xf19f90ed50266d, 0xabf2b4bf65f9df, 0x313865468fafec, 0x5cb379ba910a17},
149 {{0x0641966cab26e3, 0x91fb2991fab0a0, 0xefec27a4e13a0b, 0x0499aa8a5f8ebe},
150 {0x7510407766af5d, 0x84d929610d5450, 0x81d77aae82f706, 0x6916f6d4338c5b},
152 {{0xea95ac3b1f15c6, 0x086000905e82d4, 0xdd323ae4d1c8b1, 0x932b56be7685a3},
153 {0x9ef93dea25dbbf, 0x41665960f390f0, 0xfdec76dbe2a8a7, 0x523e80f019062a},
155 {{0x822fdd26732c73, 0xa01c83531b5d0f, 0x363f37347c1ba4, 0xc391b45c84725c},
156 {0xbbd5e1b2d6ad24, 0xddfbcde19dfaec, 0xc393da7e222a7f, 0x1efb7890ede244},
158 {{0x4c9e90ca217da1, 0xd11beca79159bb, 0xff8d33c2c98b7c, 0x2610b39409f849},
159 {0x44d1352ac64da0, 0xcdbb7b2c46b4fb, 0x966c079b753c89, 0xfe67e4e820b112},
161 {{0xe28cae2df5312d, 0xc71b61d16f5c6e, 0x79b7619a3e7c4c, 0x05c73240899b47},
162 {0x9f7f6382c73e3a, 0x18615165c56bda, 0x641fab2116fd56, 0x72855882b08394},
164 {{0x0469182f161c09, 0x74a98ca8d00fb5, 0xb89da93489a3e0, 0x41c98768fb0c1d},
165 {0xe5ea05fb32da81, 0x3dce9ffbca6855, 0x1cfe2d3fbf59e6, 0x0e5e03408738a7},
167 {{0xdab22b2333e87f, 0x4430137a5dd2f6, 0xe03ab9f738beb8, 0xcb0c5d0dc34f24},
168 {0x764a7df0c8fda5, 0x185ba5c3fa2044, 0x9281d688bcbe50, 0xc40331df893881},
170 {{0xb89530796f0f60, 0xade92bd26909a3, 0x1a0c83fb4884da, 0x1765bf22a5a984},
171 {0x772a9ee75db09e, 0x23bc6c67cec16f, 0x4c1edba8b14e2f, 0xe2a215d9611369},
173 {{0x571e509fb5efb3, 0xade88696410552, 0xc8ae85fada74fe, 0x6c7e4be83bbde3},
174 {0xff9f51160f4652, 0xb47ce2495a6539, 0xa2946c53b582f4, 0x286d2db3ee9a60},
176 {{0x40bbd5081a44af, 0x0995183b13926c, 0xbcefba6f47f6d0, 0x215619e9cc0057},
177 {0x8bc94d3b0df45e, 0xf11c54a3694f6f, 0x8631b93cdfe8b5, 0xe7e3f4b0982db9},
179 {{0xb17048ab3e1c7b, 0xac38f36ff8a1d8, 0x1c29819435d2c6, 0xc813132f4c07e9},
180 {0x2891425503b11f, 0x08781030579fea, 0xf5426ba5cc9674, 0x1e28ebf18562bc},
182 {{0x9f31997cc864eb, 0x06cd91d28b5e4c, 0xff17036691a973, 0xf1aef351497c58},
183 {0xdd1f2d600564ff, 0xdead073b1402db, 0x74a684435bd693, 0xeea7471f962558},
188 {{0x9665266dddf554, 0x9613d78b60ef2d, 0xce27a34cdba417, 0xd35ab74d6afc31},
189 {0x85ccdd22deb15e, 0x2137e5783a6aab, 0xa141cffd8c93c6, 0x355a1830e90f2d},
191 {{0x1a494eadaade65, 0xd6da4da77fe53c, 0xe7992996abec86, 0x65c3553c6090e3},
192 {0xfa610b1fb09346, 0xf1c6540b8a4aaf, 0xc51a13ccd3cbab, 0x02995b1b18c28a},
194 {{0x7874568e7295ef, 0x86b419fbe38d04, 0xdc0690a7550d9a, 0xd3966a44beac33},
195 {0x2b7280ec29132f, 0xbeaa3b6a032df3, 0xdc7dd88ae41200, 0xd25e2513e3a100},
197 {{0x924857eb2efafd, 0xac2bce41223190, 0x8edaa1445553fc, 0x825800fd3562d5},
198 {0x8d79148ea96621, 0x23a01c3dd9ed8d, 0xaf8b219f9416b5, 0xd8db0cc277daea},
200 {{0x76a9c3b1a700f0, 0xe9acd29bc7e691, 0x69212d1a6b0327, 0x6322e97fe154be},
201 {0x469fc5465d62aa, 0x8d41ed18883b05, 0x1f8eae66c52b88, 0xe4fcbe9325be51},
203 {{0x825fdf583cac16, 0x020b857c7b023a, 0x683c17744b0165, 0x14ffd0a2daf2f1},
204 {0x323b36184218f9, 0x4944ec4e3b47d4, 0xc15b3080841acf, 0x0bced4b01a28bb},
206 {{0x92ac22230df5c4, 0x52f33b4063eda8, 0xcb3f19870c0c93, 0x40064f2ba65233},
207 {0xfe16f0924f8992, 0x012da25af5b517, 0x1a57bb24f723a6, 0x06f8bc76760def},
209 {{0x4a7084f7817cb9, 0xbcab0738ee9a78, 0x3ec11e11d9c326, 0xdc0fe90e0f1aae},
210 {0xcf639ea5f98390, 0x5c350aa22ffb74, 0x9afae98a4047b7, 0x956ec2d617fc45},
212 {{0x4306d648c1be6a, 0x9247cd8bc9a462, 0xf5595e377d2f2e, 0xbd1c3caff1a52e},
213 {0x045e14472409d0, 0x29f3e17078f773, 0x745a602b2d4f7d, 0x191837685cdfbb},
215 {{0x5b6ee254a8cb79, 0x4953433f5e7026, 0xe21faeb1d1def4, 0xc4c225785c09de},
216 {0x307ce7bba1e518, 0x31b125b1036db8, 0x47e91868839e8f, 0xc765866e33b9f3},
218 {{0x3bfece24f96906, 0x4794da641e5093, 0xde5df64f95db26, 0x297ecd89714b05},
219 {0x701bd3ebb2c3aa, 0x7073b4f53cb1d5, 0x13c5665658af16, 0x9895089d66fe58},
221 {{0x0fef05f78c4790, 0x2d773633b05d2e, 0x94229c3a951c94, 0xbbbd70df4911bb},
222 {0xb2c6963d2c1168, 0x105f47a72b0d73, 0x9fdf6111614080, 0x7b7e94b39e67b0},
224 {{0xad1a7d6efbe2b3, 0xf012482c0da69d, 0x6b3bdf12438345, 0x40d7558d7aa4d9},
225 {0x8a09fffb5c6d3d, 0x9a356e5d9ffd38, 0x5973f15f4f9b1c, 0xdcd5f59f63c3ea},
227 {{0xacf39f4c5ca7ab, 0x4c8071cc5fd737, 0xc64e3602cd1184, 0x0acd4644c9abba},
228 {0x6c011a36d8bf6e, 0xfecd87ba24e32a, 0x19f6f56574fad8, 0x050b204ced9405},
230 {{0xed4f1cae7d9a96, 0x5ceef7ad94c40a, 0x778e4a3bf3ef9b, 0x7405783dc3b55e},
231 {0x32477c61b6e8c6, 0xb46a97570f018b, 0x91176d0a7e95d1, 0x3df90fbc4c7d0e},
235 /* Precomputation for the group generator. */
236 struct nistp224_pre_comp_st {
237 felem g_pre_comp[2][16][3];
238 CRYPTO_REF_COUNT references;
242 const EC_METHOD *EC_GFp_nistp224_method(void)
244 static const EC_METHOD ret = {
245 EC_FLAGS_DEFAULT_OCT,
246 NID_X9_62_prime_field,
247 ec_GFp_nistp224_group_init,
248 ec_GFp_simple_group_finish,
249 ec_GFp_simple_group_clear_finish,
250 ec_GFp_nist_group_copy,
251 ec_GFp_nistp224_group_set_curve,
252 ec_GFp_simple_group_get_curve,
253 ec_GFp_simple_group_get_degree,
254 ec_group_simple_order_bits,
255 ec_GFp_simple_group_check_discriminant,
256 ec_GFp_simple_point_init,
257 ec_GFp_simple_point_finish,
258 ec_GFp_simple_point_clear_finish,
259 ec_GFp_simple_point_copy,
260 ec_GFp_simple_point_set_to_infinity,
261 ec_GFp_simple_set_Jprojective_coordinates_GFp,
262 ec_GFp_simple_get_Jprojective_coordinates_GFp,
263 ec_GFp_simple_point_set_affine_coordinates,
264 ec_GFp_nistp224_point_get_affine_coordinates,
265 0 /* point_set_compressed_coordinates */ ,
270 ec_GFp_simple_invert,
271 ec_GFp_simple_is_at_infinity,
272 ec_GFp_simple_is_on_curve,
274 ec_GFp_simple_make_affine,
275 ec_GFp_simple_points_make_affine,
276 ec_GFp_nistp224_points_mul,
277 ec_GFp_nistp224_precompute_mult,
278 ec_GFp_nistp224_have_precompute_mult,
279 ec_GFp_nist_field_mul,
280 ec_GFp_nist_field_sqr,
282 0 /* field_encode */ ,
283 0 /* field_decode */ ,
284 0, /* field_set_to_one */
285 ec_key_simple_priv2oct,
286 ec_key_simple_oct2priv,
288 ec_key_simple_generate_key,
289 ec_key_simple_check_key,
290 ec_key_simple_generate_public_key,
293 ecdh_simple_compute_key
300 * Helper functions to convert field elements to/from internal representation
302 static void bin28_to_felem(felem out, const u8 in[28])
304 out[0] = *((const uint64_t *)(in)) & 0x00ffffffffffffff;
305 out[1] = (*((const uint64_t *)(in + 7))) & 0x00ffffffffffffff;
306 out[2] = (*((const uint64_t *)(in + 14))) & 0x00ffffffffffffff;
307 out[3] = (*((const uint64_t *)(in+20))) >> 8;
310 static void felem_to_bin28(u8 out[28], const felem in)
313 for (i = 0; i < 7; ++i) {
314 out[i] = in[0] >> (8 * i);
315 out[i + 7] = in[1] >> (8 * i);
316 out[i + 14] = in[2] >> (8 * i);
317 out[i + 21] = in[3] >> (8 * i);
321 /* To preserve endianness when using BN_bn2bin and BN_bin2bn */
322 static void flip_endian(u8 *out, const u8 *in, unsigned len)
325 for (i = 0; i < len; ++i)
326 out[i] = in[len - 1 - i];
329 /* From OpenSSL BIGNUM to internal representation */
330 static int BN_to_felem(felem out, const BIGNUM *bn)
332 felem_bytearray b_in;
333 felem_bytearray b_out;
336 /* BN_bn2bin eats leading zeroes */
337 memset(b_out, 0, sizeof(b_out));
338 num_bytes = BN_num_bytes(bn);
339 if (num_bytes > sizeof(b_out)) {
340 ECerr(EC_F_BN_TO_FELEM, EC_R_BIGNUM_OUT_OF_RANGE);
343 if (BN_is_negative(bn)) {
344 ECerr(EC_F_BN_TO_FELEM, EC_R_BIGNUM_OUT_OF_RANGE);
347 num_bytes = BN_bn2bin(bn, b_in);
348 flip_endian(b_out, b_in, num_bytes);
349 bin28_to_felem(out, b_out);
353 /* From internal representation to OpenSSL BIGNUM */
354 static BIGNUM *felem_to_BN(BIGNUM *out, const felem in)
356 felem_bytearray b_in, b_out;
357 felem_to_bin28(b_in, in);
358 flip_endian(b_out, b_in, sizeof(b_out));
359 return BN_bin2bn(b_out, sizeof(b_out), out);
362 /******************************************************************************/
366 * Field operations, using the internal representation of field elements.
367 * NB! These operations are specific to our point multiplication and cannot be
368 * expected to be correct in general - e.g., multiplication with a large scalar
369 * will cause an overflow.
373 static void felem_one(felem out)
381 static void felem_assign(felem out, const felem in)
389 /* Sum two field elements: out += in */
390 static void felem_sum(felem out, const felem in)
398 /* Subtract field elements: out -= in */
399 /* Assumes in[i] < 2^57 */
400 static void felem_diff(felem out, const felem in)
402 static const limb two58p2 = (((limb) 1) << 58) + (((limb) 1) << 2);
403 static const limb two58m2 = (((limb) 1) << 58) - (((limb) 1) << 2);
404 static const limb two58m42m2 = (((limb) 1) << 58) -
405 (((limb) 1) << 42) - (((limb) 1) << 2);
407 /* Add 0 mod 2^224-2^96+1 to ensure out > in */
409 out[1] += two58m42m2;
419 /* Subtract in unreduced 128-bit mode: out -= in */
420 /* Assumes in[i] < 2^119 */
421 static void widefelem_diff(widefelem out, const widefelem in)
423 static const widelimb two120 = ((widelimb) 1) << 120;
424 static const widelimb two120m64 = (((widelimb) 1) << 120) -
425 (((widelimb) 1) << 64);
426 static const widelimb two120m104m64 = (((widelimb) 1) << 120) -
427 (((widelimb) 1) << 104) - (((widelimb) 1) << 64);
429 /* Add 0 mod 2^224-2^96+1 to ensure out > in */
434 out[4] += two120m104m64;
447 /* Subtract in mixed mode: out128 -= in64 */
449 static void felem_diff_128_64(widefelem out, const felem in)
451 static const widelimb two64p8 = (((widelimb) 1) << 64) +
452 (((widelimb) 1) << 8);
453 static const widelimb two64m8 = (((widelimb) 1) << 64) -
454 (((widelimb) 1) << 8);
455 static const widelimb two64m48m8 = (((widelimb) 1) << 64) -
456 (((widelimb) 1) << 48) - (((widelimb) 1) << 8);
458 /* Add 0 mod 2^224-2^96+1 to ensure out > in */
460 out[1] += two64m48m8;
471 * Multiply a field element by a scalar: out = out * scalar The scalars we
472 * actually use are small, so results fit without overflow
474 static void felem_scalar(felem out, const limb scalar)
483 * Multiply an unreduced field element by a scalar: out = out * scalar The
484 * scalars we actually use are small, so results fit without overflow
486 static void widefelem_scalar(widefelem out, const widelimb scalar)
497 /* Square a field element: out = in^2 */
498 static void felem_square(widefelem out, const felem in)
500 limb tmp0, tmp1, tmp2;
504 out[0] = ((widelimb) in[0]) * in[0];
505 out[1] = ((widelimb) in[0]) * tmp1;
506 out[2] = ((widelimb) in[0]) * tmp2 + ((widelimb) in[1]) * in[1];
507 out[3] = ((widelimb) in[3]) * tmp0 + ((widelimb) in[1]) * tmp2;
508 out[4] = ((widelimb) in[3]) * tmp1 + ((widelimb) in[2]) * in[2];
509 out[5] = ((widelimb) in[3]) * tmp2;
510 out[6] = ((widelimb) in[3]) * in[3];
513 /* Multiply two field elements: out = in1 * in2 */
514 static void felem_mul(widefelem out, const felem in1, const felem in2)
516 out[0] = ((widelimb) in1[0]) * in2[0];
517 out[1] = ((widelimb) in1[0]) * in2[1] + ((widelimb) in1[1]) * in2[0];
518 out[2] = ((widelimb) in1[0]) * in2[2] + ((widelimb) in1[1]) * in2[1] +
519 ((widelimb) in1[2]) * in2[0];
520 out[3] = ((widelimb) in1[0]) * in2[3] + ((widelimb) in1[1]) * in2[2] +
521 ((widelimb) in1[2]) * in2[1] + ((widelimb) in1[3]) * in2[0];
522 out[4] = ((widelimb) in1[1]) * in2[3] + ((widelimb) in1[2]) * in2[2] +
523 ((widelimb) in1[3]) * in2[1];
524 out[5] = ((widelimb) in1[2]) * in2[3] + ((widelimb) in1[3]) * in2[2];
525 out[6] = ((widelimb) in1[3]) * in2[3];
529 * Reduce seven 128-bit coefficients to four 64-bit coefficients.
530 * Requires in[i] < 2^126,
531 * ensures out[0] < 2^56, out[1] < 2^56, out[2] < 2^56, out[3] <= 2^56 + 2^16 */
532 static void felem_reduce(felem out, const widefelem in)
534 static const widelimb two127p15 = (((widelimb) 1) << 127) +
535 (((widelimb) 1) << 15);
536 static const widelimb two127m71 = (((widelimb) 1) << 127) -
537 (((widelimb) 1) << 71);
538 static const widelimb two127m71m55 = (((widelimb) 1) << 127) -
539 (((widelimb) 1) << 71) - (((widelimb) 1) << 55);
542 /* Add 0 mod 2^224-2^96+1 to ensure all differences are positive */
543 output[0] = in[0] + two127p15;
544 output[1] = in[1] + two127m71m55;
545 output[2] = in[2] + two127m71;
549 /* Eliminate in[4], in[5], in[6] */
550 output[4] += in[6] >> 16;
551 output[3] += (in[6] & 0xffff) << 40;
554 output[3] += in[5] >> 16;
555 output[2] += (in[5] & 0xffff) << 40;
558 output[2] += output[4] >> 16;
559 output[1] += (output[4] & 0xffff) << 40;
560 output[0] -= output[4];
562 /* Carry 2 -> 3 -> 4 */
563 output[3] += output[2] >> 56;
564 output[2] &= 0x00ffffffffffffff;
566 output[4] = output[3] >> 56;
567 output[3] &= 0x00ffffffffffffff;
569 /* Now output[2] < 2^56, output[3] < 2^56, output[4] < 2^72 */
571 /* Eliminate output[4] */
572 output[2] += output[4] >> 16;
573 /* output[2] < 2^56 + 2^56 = 2^57 */
574 output[1] += (output[4] & 0xffff) << 40;
575 output[0] -= output[4];
577 /* Carry 0 -> 1 -> 2 -> 3 */
578 output[1] += output[0] >> 56;
579 out[0] = output[0] & 0x00ffffffffffffff;
581 output[2] += output[1] >> 56;
582 /* output[2] < 2^57 + 2^72 */
583 out[1] = output[1] & 0x00ffffffffffffff;
584 output[3] += output[2] >> 56;
585 /* output[3] <= 2^56 + 2^16 */
586 out[2] = output[2] & 0x00ffffffffffffff;
589 * out[0] < 2^56, out[1] < 2^56, out[2] < 2^56,
590 * out[3] <= 2^56 + 2^16 (due to final carry),
596 static void felem_square_reduce(felem out, const felem in)
599 felem_square(tmp, in);
600 felem_reduce(out, tmp);
603 static void felem_mul_reduce(felem out, const felem in1, const felem in2)
606 felem_mul(tmp, in1, in2);
607 felem_reduce(out, tmp);
611 * Reduce to unique minimal representation. Requires 0 <= in < 2*p (always
612 * call felem_reduce first)
614 static void felem_contract(felem out, const felem in)
616 static const int64_t two56 = ((limb) 1) << 56;
617 /* 0 <= in < 2*p, p = 2^224 - 2^96 + 1 */
618 /* if in > p , reduce in = in - 2^224 + 2^96 - 1 */
624 /* Case 1: a = 1 iff in >= 2^224 */
628 tmp[3] &= 0x00ffffffffffffff;
630 * Case 2: a = 0 iff p <= in < 2^224, i.e., the high 128 bits are all 1
631 * and the lower part is non-zero
633 a = ((in[3] & in[2] & (in[1] | 0x000000ffffffffff)) + 1) |
634 (((int64_t) (in[0] + (in[1] & 0x000000ffffffffff)) - 1) >> 63);
635 a &= 0x00ffffffffffffff;
636 /* turn a into an all-one mask (if a = 0) or an all-zero mask */
638 /* subtract 2^224 - 2^96 + 1 if a is all-one */
639 tmp[3] &= a ^ 0xffffffffffffffff;
640 tmp[2] &= a ^ 0xffffffffffffffff;
641 tmp[1] &= (a ^ 0xffffffffffffffff) | 0x000000ffffffffff;
645 * eliminate negative coefficients: if tmp[0] is negative, tmp[1] must be
646 * non-zero, so we only need one step
652 /* carry 1 -> 2 -> 3 */
653 tmp[2] += tmp[1] >> 56;
654 tmp[1] &= 0x00ffffffffffffff;
656 tmp[3] += tmp[2] >> 56;
657 tmp[2] &= 0x00ffffffffffffff;
659 /* Now 0 <= out < p */
667 * Get negative value: out = -in
668 * Requires in[i] < 2^63,
669 * ensures out[0] < 2^56, out[1] < 2^56, out[2] < 2^56, out[3] <= 2^56 + 2^16
671 static void felem_neg(felem out, const felem in)
674 felem_diff_128_64(tmp, in);
675 felem_reduce(out, tmp);
679 * Zero-check: returns 1 if input is 0, and 0 otherwise. We know that field
680 * elements are reduced to in < 2^225, so we only need to check three cases:
681 * 0, 2^224 - 2^96 + 1, and 2^225 - 2^97 + 2
683 static limb felem_is_zero(const felem in)
685 limb zero, two224m96p1, two225m97p2;
687 zero = in[0] | in[1] | in[2] | in[3];
688 zero = (((int64_t) (zero) - 1) >> 63) & 1;
689 two224m96p1 = (in[0] ^ 1) | (in[1] ^ 0x00ffff0000000000)
690 | (in[2] ^ 0x00ffffffffffffff) | (in[3] ^ 0x00ffffffffffffff);
691 two224m96p1 = (((int64_t) (two224m96p1) - 1) >> 63) & 1;
692 two225m97p2 = (in[0] ^ 2) | (in[1] ^ 0x00fffe0000000000)
693 | (in[2] ^ 0x00ffffffffffffff) | (in[3] ^ 0x01ffffffffffffff);
694 two225m97p2 = (((int64_t) (two225m97p2) - 1) >> 63) & 1;
695 return (zero | two224m96p1 | two225m97p2);
698 static int felem_is_zero_int(const void *in)
700 return (int)(felem_is_zero(in) & ((limb) 1));
703 /* Invert a field element */
704 /* Computation chain copied from djb's code */
705 static void felem_inv(felem out, const felem in)
707 felem ftmp, ftmp2, ftmp3, ftmp4;
711 felem_square(tmp, in);
712 felem_reduce(ftmp, tmp); /* 2 */
713 felem_mul(tmp, in, ftmp);
714 felem_reduce(ftmp, tmp); /* 2^2 - 1 */
715 felem_square(tmp, ftmp);
716 felem_reduce(ftmp, tmp); /* 2^3 - 2 */
717 felem_mul(tmp, in, ftmp);
718 felem_reduce(ftmp, tmp); /* 2^3 - 1 */
719 felem_square(tmp, ftmp);
720 felem_reduce(ftmp2, tmp); /* 2^4 - 2 */
721 felem_square(tmp, ftmp2);
722 felem_reduce(ftmp2, tmp); /* 2^5 - 4 */
723 felem_square(tmp, ftmp2);
724 felem_reduce(ftmp2, tmp); /* 2^6 - 8 */
725 felem_mul(tmp, ftmp2, ftmp);
726 felem_reduce(ftmp, tmp); /* 2^6 - 1 */
727 felem_square(tmp, ftmp);
728 felem_reduce(ftmp2, tmp); /* 2^7 - 2 */
729 for (i = 0; i < 5; ++i) { /* 2^12 - 2^6 */
730 felem_square(tmp, ftmp2);
731 felem_reduce(ftmp2, tmp);
733 felem_mul(tmp, ftmp2, ftmp);
734 felem_reduce(ftmp2, tmp); /* 2^12 - 1 */
735 felem_square(tmp, ftmp2);
736 felem_reduce(ftmp3, tmp); /* 2^13 - 2 */
737 for (i = 0; i < 11; ++i) { /* 2^24 - 2^12 */
738 felem_square(tmp, ftmp3);
739 felem_reduce(ftmp3, tmp);
741 felem_mul(tmp, ftmp3, ftmp2);
742 felem_reduce(ftmp2, tmp); /* 2^24 - 1 */
743 felem_square(tmp, ftmp2);
744 felem_reduce(ftmp3, tmp); /* 2^25 - 2 */
745 for (i = 0; i < 23; ++i) { /* 2^48 - 2^24 */
746 felem_square(tmp, ftmp3);
747 felem_reduce(ftmp3, tmp);
749 felem_mul(tmp, ftmp3, ftmp2);
750 felem_reduce(ftmp3, tmp); /* 2^48 - 1 */
751 felem_square(tmp, ftmp3);
752 felem_reduce(ftmp4, tmp); /* 2^49 - 2 */
753 for (i = 0; i < 47; ++i) { /* 2^96 - 2^48 */
754 felem_square(tmp, ftmp4);
755 felem_reduce(ftmp4, tmp);
757 felem_mul(tmp, ftmp3, ftmp4);
758 felem_reduce(ftmp3, tmp); /* 2^96 - 1 */
759 felem_square(tmp, ftmp3);
760 felem_reduce(ftmp4, tmp); /* 2^97 - 2 */
761 for (i = 0; i < 23; ++i) { /* 2^120 - 2^24 */
762 felem_square(tmp, ftmp4);
763 felem_reduce(ftmp4, tmp);
765 felem_mul(tmp, ftmp2, ftmp4);
766 felem_reduce(ftmp2, tmp); /* 2^120 - 1 */
767 for (i = 0; i < 6; ++i) { /* 2^126 - 2^6 */
768 felem_square(tmp, ftmp2);
769 felem_reduce(ftmp2, tmp);
771 felem_mul(tmp, ftmp2, ftmp);
772 felem_reduce(ftmp, tmp); /* 2^126 - 1 */
773 felem_square(tmp, ftmp);
774 felem_reduce(ftmp, tmp); /* 2^127 - 2 */
775 felem_mul(tmp, ftmp, in);
776 felem_reduce(ftmp, tmp); /* 2^127 - 1 */
777 for (i = 0; i < 97; ++i) { /* 2^224 - 2^97 */
778 felem_square(tmp, ftmp);
779 felem_reduce(ftmp, tmp);
781 felem_mul(tmp, ftmp, ftmp3);
782 felem_reduce(out, tmp); /* 2^224 - 2^96 - 1 */
786 * Copy in constant time: if icopy == 1, copy in to out, if icopy == 0, copy
789 static void copy_conditional(felem out, const felem in, limb icopy)
793 * icopy is a (64-bit) 0 or 1, so copy is either all-zero or all-one
795 const limb copy = -icopy;
796 for (i = 0; i < 4; ++i) {
797 const limb tmp = copy & (in[i] ^ out[i]);
802 /******************************************************************************/
804 * ELLIPTIC CURVE POINT OPERATIONS
806 * Points are represented in Jacobian projective coordinates:
807 * (X, Y, Z) corresponds to the affine point (X/Z^2, Y/Z^3),
808 * or to the point at infinity if Z == 0.
813 * Double an elliptic curve point:
814 * (X', Y', Z') = 2 * (X, Y, Z), where
815 * X' = (3 * (X - Z^2) * (X + Z^2))^2 - 8 * X * Y^2
816 * Y' = 3 * (X - Z^2) * (X + Z^2) * (4 * X * Y^2 - X') - 8 * Y^4
817 * Z' = (Y + Z)^2 - Y^2 - Z^2 = 2 * Y * Z
818 * Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed,
819 * while x_out == y_in is not (maybe this works, but it's not tested).
822 point_double(felem x_out, felem y_out, felem z_out,
823 const felem x_in, const felem y_in, const felem z_in)
826 felem delta, gamma, beta, alpha, ftmp, ftmp2;
828 felem_assign(ftmp, x_in);
829 felem_assign(ftmp2, x_in);
832 felem_square(tmp, z_in);
833 felem_reduce(delta, tmp);
836 felem_square(tmp, y_in);
837 felem_reduce(gamma, tmp);
840 felem_mul(tmp, x_in, gamma);
841 felem_reduce(beta, tmp);
843 /* alpha = 3*(x-delta)*(x+delta) */
844 felem_diff(ftmp, delta);
845 /* ftmp[i] < 2^57 + 2^58 + 2 < 2^59 */
846 felem_sum(ftmp2, delta);
847 /* ftmp2[i] < 2^57 + 2^57 = 2^58 */
848 felem_scalar(ftmp2, 3);
849 /* ftmp2[i] < 3 * 2^58 < 2^60 */
850 felem_mul(tmp, ftmp, ftmp2);
851 /* tmp[i] < 2^60 * 2^59 * 4 = 2^121 */
852 felem_reduce(alpha, tmp);
854 /* x' = alpha^2 - 8*beta */
855 felem_square(tmp, alpha);
856 /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
857 felem_assign(ftmp, beta);
858 felem_scalar(ftmp, 8);
859 /* ftmp[i] < 8 * 2^57 = 2^60 */
860 felem_diff_128_64(tmp, ftmp);
861 /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */
862 felem_reduce(x_out, tmp);
864 /* z' = (y + z)^2 - gamma - delta */
865 felem_sum(delta, gamma);
866 /* delta[i] < 2^57 + 2^57 = 2^58 */
867 felem_assign(ftmp, y_in);
868 felem_sum(ftmp, z_in);
869 /* ftmp[i] < 2^57 + 2^57 = 2^58 */
870 felem_square(tmp, ftmp);
871 /* tmp[i] < 4 * 2^58 * 2^58 = 2^118 */
872 felem_diff_128_64(tmp, delta);
873 /* tmp[i] < 2^118 + 2^64 + 8 < 2^119 */
874 felem_reduce(z_out, tmp);
876 /* y' = alpha*(4*beta - x') - 8*gamma^2 */
877 felem_scalar(beta, 4);
878 /* beta[i] < 4 * 2^57 = 2^59 */
879 felem_diff(beta, x_out);
880 /* beta[i] < 2^59 + 2^58 + 2 < 2^60 */
881 felem_mul(tmp, alpha, beta);
882 /* tmp[i] < 4 * 2^57 * 2^60 = 2^119 */
883 felem_square(tmp2, gamma);
884 /* tmp2[i] < 4 * 2^57 * 2^57 = 2^116 */
885 widefelem_scalar(tmp2, 8);
886 /* tmp2[i] < 8 * 2^116 = 2^119 */
887 widefelem_diff(tmp, tmp2);
888 /* tmp[i] < 2^119 + 2^120 < 2^121 */
889 felem_reduce(y_out, tmp);
893 * Add two elliptic curve points:
894 * (X_1, Y_1, Z_1) + (X_2, Y_2, Z_2) = (X_3, Y_3, Z_3), where
895 * X_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1)^2 - (Z_1^2 * X_2 - Z_2^2 * X_1)^3 -
896 * 2 * Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^2
897 * Y_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1) * (Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^2 - X_3) -
898 * Z_2^3 * Y_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^3
899 * Z_3 = (Z_1^2 * X_2 - Z_2^2 * X_1) * (Z_1 * Z_2)
901 * This runs faster if 'mixed' is set, which requires Z_2 = 1 or Z_2 = 0.
905 * This function is not entirely constant-time: it includes a branch for
906 * checking whether the two input points are equal, (while not equal to the
907 * point at infinity). This case never happens during single point
908 * multiplication, so there is no timing leak for ECDH or ECDSA signing.
910 static void point_add(felem x3, felem y3, felem z3,
911 const felem x1, const felem y1, const felem z1,
912 const int mixed, const felem x2, const felem y2,
915 felem ftmp, ftmp2, ftmp3, ftmp4, ftmp5, x_out, y_out, z_out;
917 limb z1_is_zero, z2_is_zero, x_equal, y_equal;
921 felem_square(tmp, z2);
922 felem_reduce(ftmp2, tmp);
925 felem_mul(tmp, ftmp2, z2);
926 felem_reduce(ftmp4, tmp);
928 /* ftmp4 = z2^3*y1 */
929 felem_mul(tmp2, ftmp4, y1);
930 felem_reduce(ftmp4, tmp2);
932 /* ftmp2 = z2^2*x1 */
933 felem_mul(tmp2, ftmp2, x1);
934 felem_reduce(ftmp2, tmp2);
937 * We'll assume z2 = 1 (special case z2 = 0 is handled later)
940 /* ftmp4 = z2^3*y1 */
941 felem_assign(ftmp4, y1);
943 /* ftmp2 = z2^2*x1 */
944 felem_assign(ftmp2, x1);
948 felem_square(tmp, z1);
949 felem_reduce(ftmp, tmp);
952 felem_mul(tmp, ftmp, z1);
953 felem_reduce(ftmp3, tmp);
956 felem_mul(tmp, ftmp3, y2);
957 /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
959 /* ftmp3 = z1^3*y2 - z2^3*y1 */
960 felem_diff_128_64(tmp, ftmp4);
961 /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */
962 felem_reduce(ftmp3, tmp);
965 felem_mul(tmp, ftmp, x2);
966 /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
968 /* ftmp = z1^2*x2 - z2^2*x1 */
969 felem_diff_128_64(tmp, ftmp2);
970 /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */
971 felem_reduce(ftmp, tmp);
974 * the formulae are incorrect if the points are equal so we check for
975 * this and do doubling if this happens
977 x_equal = felem_is_zero(ftmp);
978 y_equal = felem_is_zero(ftmp3);
979 z1_is_zero = felem_is_zero(z1);
980 z2_is_zero = felem_is_zero(z2);
981 /* In affine coordinates, (X_1, Y_1) == (X_2, Y_2) */
982 if (x_equal && y_equal && !z1_is_zero && !z2_is_zero) {
983 point_double(x3, y3, z3, x1, y1, z1);
989 felem_mul(tmp, z1, z2);
990 felem_reduce(ftmp5, tmp);
992 /* special case z2 = 0 is handled later */
993 felem_assign(ftmp5, z1);
996 /* z_out = (z1^2*x2 - z2^2*x1)*(z1*z2) */
997 felem_mul(tmp, ftmp, ftmp5);
998 felem_reduce(z_out, tmp);
1000 /* ftmp = (z1^2*x2 - z2^2*x1)^2 */
1001 felem_assign(ftmp5, ftmp);
1002 felem_square(tmp, ftmp);
1003 felem_reduce(ftmp, tmp);
1005 /* ftmp5 = (z1^2*x2 - z2^2*x1)^3 */
1006 felem_mul(tmp, ftmp, ftmp5);
1007 felem_reduce(ftmp5, tmp);
1009 /* ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */
1010 felem_mul(tmp, ftmp2, ftmp);
1011 felem_reduce(ftmp2, tmp);
1013 /* tmp = z2^3*y1*(z1^2*x2 - z2^2*x1)^3 */
1014 felem_mul(tmp, ftmp4, ftmp5);
1015 /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
1017 /* tmp2 = (z1^3*y2 - z2^3*y1)^2 */
1018 felem_square(tmp2, ftmp3);
1019 /* tmp2[i] < 4 * 2^57 * 2^57 < 2^116 */
1021 /* tmp2 = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 */
1022 felem_diff_128_64(tmp2, ftmp5);
1023 /* tmp2[i] < 2^116 + 2^64 + 8 < 2^117 */
1025 /* ftmp5 = 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */
1026 felem_assign(ftmp5, ftmp2);
1027 felem_scalar(ftmp5, 2);
1028 /* ftmp5[i] < 2 * 2^57 = 2^58 */
1031 * x_out = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 -
1032 * 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2
1034 felem_diff_128_64(tmp2, ftmp5);
1035 /* tmp2[i] < 2^117 + 2^64 + 8 < 2^118 */
1036 felem_reduce(x_out, tmp2);
1038 /* ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out */
1039 felem_diff(ftmp2, x_out);
1040 /* ftmp2[i] < 2^57 + 2^58 + 2 < 2^59 */
1043 * tmp2 = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out)
1045 felem_mul(tmp2, ftmp3, ftmp2);
1046 /* tmp2[i] < 4 * 2^57 * 2^59 = 2^118 */
1049 * y_out = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out) -
1050 * z2^3*y1*(z1^2*x2 - z2^2*x1)^3
1052 widefelem_diff(tmp2, tmp);
1053 /* tmp2[i] < 2^118 + 2^120 < 2^121 */
1054 felem_reduce(y_out, tmp2);
1057 * the result (x_out, y_out, z_out) is incorrect if one of the inputs is
1058 * the point at infinity, so we need to check for this separately
1062 * if point 1 is at infinity, copy point 2 to output, and vice versa
1064 copy_conditional(x_out, x2, z1_is_zero);
1065 copy_conditional(x_out, x1, z2_is_zero);
1066 copy_conditional(y_out, y2, z1_is_zero);
1067 copy_conditional(y_out, y1, z2_is_zero);
1068 copy_conditional(z_out, z2, z1_is_zero);
1069 copy_conditional(z_out, z1, z2_is_zero);
1070 felem_assign(x3, x_out);
1071 felem_assign(y3, y_out);
1072 felem_assign(z3, z_out);
1076 * select_point selects the |idx|th point from a precomputation table and
1078 * The pre_comp array argument should be size of |size| argument
1080 static void select_point(const u64 idx, unsigned int size,
1081 const felem pre_comp[][3], felem out[3])
1084 limb *outlimbs = &out[0][0];
1086 memset(out, 0, sizeof(*out) * 3);
1087 for (i = 0; i < size; i++) {
1088 const limb *inlimbs = &pre_comp[i][0][0];
1095 for (j = 0; j < 4 * 3; j++)
1096 outlimbs[j] |= inlimbs[j] & mask;
1100 /* get_bit returns the |i|th bit in |in| */
1101 static char get_bit(const felem_bytearray in, unsigned i)
1105 return (in[i >> 3] >> (i & 7)) & 1;
1109 * Interleaved point multiplication using precomputed point multiples: The
1110 * small point multiples 0*P, 1*P, ..., 16*P are in pre_comp[], the scalars
1111 * in scalars[]. If g_scalar is non-NULL, we also add this multiple of the
1112 * generator, using certain (large) precomputed multiples in g_pre_comp.
1113 * Output point (X, Y, Z) is stored in x_out, y_out, z_out
1115 static void batch_mul(felem x_out, felem y_out, felem z_out,
1116 const felem_bytearray scalars[],
1117 const unsigned num_points, const u8 *g_scalar,
1118 const int mixed, const felem pre_comp[][17][3],
1119 const felem g_pre_comp[2][16][3])
1123 unsigned gen_mul = (g_scalar != NULL);
1124 felem nq[3], tmp[4];
1128 /* set nq to the point at infinity */
1129 memset(nq, 0, sizeof(nq));
1132 * Loop over all scalars msb-to-lsb, interleaving additions of multiples
1133 * of the generator (two in each of the last 28 rounds) and additions of
1134 * other points multiples (every 5th round).
1136 skip = 1; /* save two point operations in the first
1138 for (i = (num_points ? 220 : 27); i >= 0; --i) {
1141 point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]);
1143 /* add multiples of the generator */
1144 if (gen_mul && (i <= 27)) {
1145 /* first, look 28 bits upwards */
1146 bits = get_bit(g_scalar, i + 196) << 3;
1147 bits |= get_bit(g_scalar, i + 140) << 2;
1148 bits |= get_bit(g_scalar, i + 84) << 1;
1149 bits |= get_bit(g_scalar, i + 28);
1150 /* select the point to add, in constant time */
1151 select_point(bits, 16, g_pre_comp[1], tmp);
1154 /* value 1 below is argument for "mixed" */
1155 point_add(nq[0], nq[1], nq[2],
1156 nq[0], nq[1], nq[2], 1, tmp[0], tmp[1], tmp[2]);
1158 memcpy(nq, tmp, 3 * sizeof(felem));
1162 /* second, look at the current position */
1163 bits = get_bit(g_scalar, i + 168) << 3;
1164 bits |= get_bit(g_scalar, i + 112) << 2;
1165 bits |= get_bit(g_scalar, i + 56) << 1;
1166 bits |= get_bit(g_scalar, i);
1167 /* select the point to add, in constant time */
1168 select_point(bits, 16, g_pre_comp[0], tmp);
1169 point_add(nq[0], nq[1], nq[2],
1170 nq[0], nq[1], nq[2],
1171 1 /* mixed */ , tmp[0], tmp[1], tmp[2]);
1174 /* do other additions every 5 doublings */
1175 if (num_points && (i % 5 == 0)) {
1176 /* loop over all scalars */
1177 for (num = 0; num < num_points; ++num) {
1178 bits = get_bit(scalars[num], i + 4) << 5;
1179 bits |= get_bit(scalars[num], i + 3) << 4;
1180 bits |= get_bit(scalars[num], i + 2) << 3;
1181 bits |= get_bit(scalars[num], i + 1) << 2;
1182 bits |= get_bit(scalars[num], i) << 1;
1183 bits |= get_bit(scalars[num], i - 1);
1184 ec_GFp_nistp_recode_scalar_bits(&sign, &digit, bits);
1186 /* select the point to add or subtract */
1187 select_point(digit, 17, pre_comp[num], tmp);
1188 felem_neg(tmp[3], tmp[1]); /* (X, -Y, Z) is the negative
1190 copy_conditional(tmp[1], tmp[3], sign);
1193 point_add(nq[0], nq[1], nq[2],
1194 nq[0], nq[1], nq[2],
1195 mixed, tmp[0], tmp[1], tmp[2]);
1197 memcpy(nq, tmp, 3 * sizeof(felem));
1203 felem_assign(x_out, nq[0]);
1204 felem_assign(y_out, nq[1]);
1205 felem_assign(z_out, nq[2]);
1208 /******************************************************************************/
1210 * FUNCTIONS TO MANAGE PRECOMPUTATION
1213 static NISTP224_PRE_COMP *nistp224_pre_comp_new()
1215 NISTP224_PRE_COMP *ret = OPENSSL_zalloc(sizeof(*ret));
1218 ECerr(EC_F_NISTP224_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
1222 ret->references = 1;
1224 ret->lock = CRYPTO_THREAD_lock_new();
1225 if (ret->lock == NULL) {
1226 ECerr(EC_F_NISTP224_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
1233 NISTP224_PRE_COMP *EC_nistp224_pre_comp_dup(NISTP224_PRE_COMP *p)
1237 CRYPTO_UP_REF(&p->references, &i, p->lock);
1241 void EC_nistp224_pre_comp_free(NISTP224_PRE_COMP *p)
1248 CRYPTO_DOWN_REF(&p->references, &i, p->lock);
1249 REF_PRINT_COUNT("EC_nistp224", x);
1252 REF_ASSERT_ISNT(i < 0);
1254 CRYPTO_THREAD_lock_free(p->lock);
1258 /******************************************************************************/
1260 * OPENSSL EC_METHOD FUNCTIONS
1263 int ec_GFp_nistp224_group_init(EC_GROUP *group)
1266 ret = ec_GFp_simple_group_init(group);
1267 group->a_is_minus3 = 1;
1271 int ec_GFp_nistp224_group_set_curve(EC_GROUP *group, const BIGNUM *p,
1272 const BIGNUM *a, const BIGNUM *b,
1276 BN_CTX *new_ctx = NULL;
1277 BIGNUM *curve_p, *curve_a, *curve_b;
1280 if ((ctx = new_ctx = BN_CTX_new()) == NULL)
1283 curve_p = BN_CTX_get(ctx);
1284 curve_a = BN_CTX_get(ctx);
1285 curve_b = BN_CTX_get(ctx);
1286 if (curve_b == NULL)
1288 BN_bin2bn(nistp224_curve_params[0], sizeof(felem_bytearray), curve_p);
1289 BN_bin2bn(nistp224_curve_params[1], sizeof(felem_bytearray), curve_a);
1290 BN_bin2bn(nistp224_curve_params[2], sizeof(felem_bytearray), curve_b);
1291 if ((BN_cmp(curve_p, p)) || (BN_cmp(curve_a, a)) || (BN_cmp(curve_b, b))) {
1292 ECerr(EC_F_EC_GFP_NISTP224_GROUP_SET_CURVE,
1293 EC_R_WRONG_CURVE_PARAMETERS);
1296 group->field_mod_func = BN_nist_mod_224;
1297 ret = ec_GFp_simple_group_set_curve(group, p, a, b, ctx);
1300 BN_CTX_free(new_ctx);
1305 * Takes the Jacobian coordinates (X, Y, Z) of a point and returns (X', Y') =
1308 int ec_GFp_nistp224_point_get_affine_coordinates(const EC_GROUP *group,
1309 const EC_POINT *point,
1310 BIGNUM *x, BIGNUM *y,
1313 felem z1, z2, x_in, y_in, x_out, y_out;
1316 if (EC_POINT_is_at_infinity(group, point)) {
1317 ECerr(EC_F_EC_GFP_NISTP224_POINT_GET_AFFINE_COORDINATES,
1318 EC_R_POINT_AT_INFINITY);
1321 if ((!BN_to_felem(x_in, point->X)) || (!BN_to_felem(y_in, point->Y)) ||
1322 (!BN_to_felem(z1, point->Z)))
1325 felem_square(tmp, z2);
1326 felem_reduce(z1, tmp);
1327 felem_mul(tmp, x_in, z1);
1328 felem_reduce(x_in, tmp);
1329 felem_contract(x_out, x_in);
1331 if (!felem_to_BN(x, x_out)) {
1332 ECerr(EC_F_EC_GFP_NISTP224_POINT_GET_AFFINE_COORDINATES,
1337 felem_mul(tmp, z1, z2);
1338 felem_reduce(z1, tmp);
1339 felem_mul(tmp, y_in, z1);
1340 felem_reduce(y_in, tmp);
1341 felem_contract(y_out, y_in);
1343 if (!felem_to_BN(y, y_out)) {
1344 ECerr(EC_F_EC_GFP_NISTP224_POINT_GET_AFFINE_COORDINATES,
1352 static void make_points_affine(size_t num, felem points[ /* num */ ][3],
1353 felem tmp_felems[ /* num+1 */ ])
1356 * Runs in constant time, unless an input is the point at infinity (which
1357 * normally shouldn't happen).
1359 ec_GFp_nistp_points_make_affine_internal(num,
1363 (void (*)(void *))felem_one,
1365 (void (*)(void *, const void *))
1367 (void (*)(void *, const void *))
1368 felem_square_reduce, (void (*)
1375 (void (*)(void *, const void *))
1377 (void (*)(void *, const void *))
1382 * Computes scalar*generator + \sum scalars[i]*points[i], ignoring NULL
1383 * values Result is stored in r (r can equal one of the inputs).
1385 int ec_GFp_nistp224_points_mul(const EC_GROUP *group, EC_POINT *r,
1386 const BIGNUM *scalar, size_t num,
1387 const EC_POINT *points[],
1388 const BIGNUM *scalars[], BN_CTX *ctx)
1394 BN_CTX *new_ctx = NULL;
1395 BIGNUM *x, *y, *z, *tmp_scalar;
1396 felem_bytearray g_secret;
1397 felem_bytearray *secrets = NULL;
1398 felem (*pre_comp)[17][3] = NULL;
1399 felem *tmp_felems = NULL;
1400 felem_bytearray tmp;
1402 int have_pre_comp = 0;
1403 size_t num_points = num;
1404 felem x_in, y_in, z_in, x_out, y_out, z_out;
1405 NISTP224_PRE_COMP *pre = NULL;
1406 const felem(*g_pre_comp)[16][3] = NULL;
1407 EC_POINT *generator = NULL;
1408 const EC_POINT *p = NULL;
1409 const BIGNUM *p_scalar = NULL;
1412 if ((ctx = new_ctx = BN_CTX_new()) == NULL)
1415 x = BN_CTX_get(ctx);
1416 y = BN_CTX_get(ctx);
1417 z = BN_CTX_get(ctx);
1418 tmp_scalar = BN_CTX_get(ctx);
1419 if (tmp_scalar == NULL)
1422 if (scalar != NULL) {
1423 pre = group->pre_comp.nistp224;
1425 /* we have precomputation, try to use it */
1426 g_pre_comp = (const felem(*)[16][3])pre->g_pre_comp;
1428 /* try to use the standard precomputation */
1429 g_pre_comp = &gmul[0];
1430 generator = EC_POINT_new(group);
1431 if (generator == NULL)
1433 /* get the generator from precomputation */
1434 if (!felem_to_BN(x, g_pre_comp[0][1][0]) ||
1435 !felem_to_BN(y, g_pre_comp[0][1][1]) ||
1436 !felem_to_BN(z, g_pre_comp[0][1][2])) {
1437 ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
1440 if (!EC_POINT_set_Jprojective_coordinates_GFp(group,
1444 if (0 == EC_POINT_cmp(group, generator, group->generator, ctx))
1445 /* precomputation matches generator */
1449 * we don't have valid precomputation: treat the generator as a
1452 num_points = num_points + 1;
1455 if (num_points > 0) {
1456 if (num_points >= 3) {
1458 * unless we precompute multiples for just one or two points,
1459 * converting those into affine form is time well spent
1463 secrets = OPENSSL_zalloc(sizeof(*secrets) * num_points);
1464 pre_comp = OPENSSL_zalloc(sizeof(*pre_comp) * num_points);
1467 OPENSSL_malloc(sizeof(felem) * (num_points * 17 + 1));
1468 if ((secrets == NULL) || (pre_comp == NULL)
1469 || (mixed && (tmp_felems == NULL))) {
1470 ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_MALLOC_FAILURE);
1475 * we treat NULL scalars as 0, and NULL points as points at infinity,
1476 * i.e., they contribute nothing to the linear combination
1478 for (i = 0; i < num_points; ++i) {
1482 p = EC_GROUP_get0_generator(group);
1485 /* the i^th point */
1488 p_scalar = scalars[i];
1490 if ((p_scalar != NULL) && (p != NULL)) {
1491 /* reduce scalar to 0 <= scalar < 2^224 */
1492 if ((BN_num_bits(p_scalar) > 224)
1493 || (BN_is_negative(p_scalar))) {
1495 * this is an unusual input, and we don't guarantee
1498 if (!BN_nnmod(tmp_scalar, p_scalar, group->order, ctx)) {
1499 ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
1502 num_bytes = BN_bn2bin(tmp_scalar, tmp);
1504 num_bytes = BN_bn2bin(p_scalar, tmp);
1505 flip_endian(secrets[i], tmp, num_bytes);
1506 /* precompute multiples */
1507 if ((!BN_to_felem(x_out, p->X)) ||
1508 (!BN_to_felem(y_out, p->Y)) ||
1509 (!BN_to_felem(z_out, p->Z)))
1511 felem_assign(pre_comp[i][1][0], x_out);
1512 felem_assign(pre_comp[i][1][1], y_out);
1513 felem_assign(pre_comp[i][1][2], z_out);
1514 for (j = 2; j <= 16; ++j) {
1516 point_add(pre_comp[i][j][0], pre_comp[i][j][1],
1517 pre_comp[i][j][2], pre_comp[i][1][0],
1518 pre_comp[i][1][1], pre_comp[i][1][2], 0,
1519 pre_comp[i][j - 1][0],
1520 pre_comp[i][j - 1][1],
1521 pre_comp[i][j - 1][2]);
1523 point_double(pre_comp[i][j][0], pre_comp[i][j][1],
1524 pre_comp[i][j][2], pre_comp[i][j / 2][0],
1525 pre_comp[i][j / 2][1],
1526 pre_comp[i][j / 2][2]);
1532 make_points_affine(num_points * 17, pre_comp[0], tmp_felems);
1535 /* the scalar for the generator */
1536 if ((scalar != NULL) && (have_pre_comp)) {
1537 memset(g_secret, 0, sizeof(g_secret));
1538 /* reduce scalar to 0 <= scalar < 2^224 */
1539 if ((BN_num_bits(scalar) > 224) || (BN_is_negative(scalar))) {
1541 * this is an unusual input, and we don't guarantee
1544 if (!BN_nnmod(tmp_scalar, scalar, group->order, ctx)) {
1545 ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
1548 num_bytes = BN_bn2bin(tmp_scalar, tmp);
1550 num_bytes = BN_bn2bin(scalar, tmp);
1551 flip_endian(g_secret, tmp, num_bytes);
1552 /* do the multiplication with generator precomputation */
1553 batch_mul(x_out, y_out, z_out,
1554 (const felem_bytearray(*))secrets, num_points,
1556 mixed, (const felem(*)[17][3])pre_comp, g_pre_comp);
1558 /* do the multiplication without generator precomputation */
1559 batch_mul(x_out, y_out, z_out,
1560 (const felem_bytearray(*))secrets, num_points,
1561 NULL, mixed, (const felem(*)[17][3])pre_comp, NULL);
1562 /* reduce the output to its unique minimal representation */
1563 felem_contract(x_in, x_out);
1564 felem_contract(y_in, y_out);
1565 felem_contract(z_in, z_out);
1566 if ((!felem_to_BN(x, x_in)) || (!felem_to_BN(y, y_in)) ||
1567 (!felem_to_BN(z, z_in))) {
1568 ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
1571 ret = EC_POINT_set_Jprojective_coordinates_GFp(group, r, x, y, z, ctx);
1575 EC_POINT_free(generator);
1576 BN_CTX_free(new_ctx);
1577 OPENSSL_free(secrets);
1578 OPENSSL_free(pre_comp);
1579 OPENSSL_free(tmp_felems);
1583 int ec_GFp_nistp224_precompute_mult(EC_GROUP *group, BN_CTX *ctx)
1586 NISTP224_PRE_COMP *pre = NULL;
1588 BN_CTX *new_ctx = NULL;
1590 EC_POINT *generator = NULL;
1591 felem tmp_felems[32];
1593 /* throw away old precomputation */
1594 EC_pre_comp_free(group);
1596 if ((ctx = new_ctx = BN_CTX_new()) == NULL)
1599 x = BN_CTX_get(ctx);
1600 y = BN_CTX_get(ctx);
1603 /* get the generator */
1604 if (group->generator == NULL)
1606 generator = EC_POINT_new(group);
1607 if (generator == NULL)
1609 BN_bin2bn(nistp224_curve_params[3], sizeof(felem_bytearray), x);
1610 BN_bin2bn(nistp224_curve_params[4], sizeof(felem_bytearray), y);
1611 if (!EC_POINT_set_affine_coordinates_GFp(group, generator, x, y, ctx))
1613 if ((pre = nistp224_pre_comp_new()) == NULL)
1616 * if the generator is the standard one, use built-in precomputation
1618 if (0 == EC_POINT_cmp(group, generator, group->generator, ctx)) {
1619 memcpy(pre->g_pre_comp, gmul, sizeof(pre->g_pre_comp));
1622 if ((!BN_to_felem(pre->g_pre_comp[0][1][0], group->generator->X)) ||
1623 (!BN_to_felem(pre->g_pre_comp[0][1][1], group->generator->Y)) ||
1624 (!BN_to_felem(pre->g_pre_comp[0][1][2], group->generator->Z)))
1627 * compute 2^56*G, 2^112*G, 2^168*G for the first table, 2^28*G, 2^84*G,
1628 * 2^140*G, 2^196*G for the second one
1630 for (i = 1; i <= 8; i <<= 1) {
1631 point_double(pre->g_pre_comp[1][i][0], pre->g_pre_comp[1][i][1],
1632 pre->g_pre_comp[1][i][2], pre->g_pre_comp[0][i][0],
1633 pre->g_pre_comp[0][i][1], pre->g_pre_comp[0][i][2]);
1634 for (j = 0; j < 27; ++j) {
1635 point_double(pre->g_pre_comp[1][i][0], pre->g_pre_comp[1][i][1],
1636 pre->g_pre_comp[1][i][2], pre->g_pre_comp[1][i][0],
1637 pre->g_pre_comp[1][i][1], pre->g_pre_comp[1][i][2]);
1641 point_double(pre->g_pre_comp[0][2 * i][0],
1642 pre->g_pre_comp[0][2 * i][1],
1643 pre->g_pre_comp[0][2 * i][2], pre->g_pre_comp[1][i][0],
1644 pre->g_pre_comp[1][i][1], pre->g_pre_comp[1][i][2]);
1645 for (j = 0; j < 27; ++j) {
1646 point_double(pre->g_pre_comp[0][2 * i][0],
1647 pre->g_pre_comp[0][2 * i][1],
1648 pre->g_pre_comp[0][2 * i][2],
1649 pre->g_pre_comp[0][2 * i][0],
1650 pre->g_pre_comp[0][2 * i][1],
1651 pre->g_pre_comp[0][2 * i][2]);
1654 for (i = 0; i < 2; i++) {
1655 /* g_pre_comp[i][0] is the point at infinity */
1656 memset(pre->g_pre_comp[i][0], 0, sizeof(pre->g_pre_comp[i][0]));
1657 /* the remaining multiples */
1658 /* 2^56*G + 2^112*G resp. 2^84*G + 2^140*G */
1659 point_add(pre->g_pre_comp[i][6][0], pre->g_pre_comp[i][6][1],
1660 pre->g_pre_comp[i][6][2], pre->g_pre_comp[i][4][0],
1661 pre->g_pre_comp[i][4][1], pre->g_pre_comp[i][4][2],
1662 0, pre->g_pre_comp[i][2][0], pre->g_pre_comp[i][2][1],
1663 pre->g_pre_comp[i][2][2]);
1664 /* 2^56*G + 2^168*G resp. 2^84*G + 2^196*G */
1665 point_add(pre->g_pre_comp[i][10][0], pre->g_pre_comp[i][10][1],
1666 pre->g_pre_comp[i][10][2], pre->g_pre_comp[i][8][0],
1667 pre->g_pre_comp[i][8][1], pre->g_pre_comp[i][8][2],
1668 0, pre->g_pre_comp[i][2][0], pre->g_pre_comp[i][2][1],
1669 pre->g_pre_comp[i][2][2]);
1670 /* 2^112*G + 2^168*G resp. 2^140*G + 2^196*G */
1671 point_add(pre->g_pre_comp[i][12][0], pre->g_pre_comp[i][12][1],
1672 pre->g_pre_comp[i][12][2], pre->g_pre_comp[i][8][0],
1673 pre->g_pre_comp[i][8][1], pre->g_pre_comp[i][8][2],
1674 0, pre->g_pre_comp[i][4][0], pre->g_pre_comp[i][4][1],
1675 pre->g_pre_comp[i][4][2]);
1677 * 2^56*G + 2^112*G + 2^168*G resp. 2^84*G + 2^140*G + 2^196*G
1679 point_add(pre->g_pre_comp[i][14][0], pre->g_pre_comp[i][14][1],
1680 pre->g_pre_comp[i][14][2], pre->g_pre_comp[i][12][0],
1681 pre->g_pre_comp[i][12][1], pre->g_pre_comp[i][12][2],
1682 0, pre->g_pre_comp[i][2][0], pre->g_pre_comp[i][2][1],
1683 pre->g_pre_comp[i][2][2]);
1684 for (j = 1; j < 8; ++j) {
1685 /* odd multiples: add G resp. 2^28*G */
1686 point_add(pre->g_pre_comp[i][2 * j + 1][0],
1687 pre->g_pre_comp[i][2 * j + 1][1],
1688 pre->g_pre_comp[i][2 * j + 1][2],
1689 pre->g_pre_comp[i][2 * j][0],
1690 pre->g_pre_comp[i][2 * j][1],
1691 pre->g_pre_comp[i][2 * j][2], 0,
1692 pre->g_pre_comp[i][1][0], pre->g_pre_comp[i][1][1],
1693 pre->g_pre_comp[i][1][2]);
1696 make_points_affine(31, &(pre->g_pre_comp[0][1]), tmp_felems);
1699 SETPRECOMP(group, nistp224, pre);
1704 EC_POINT_free(generator);
1705 BN_CTX_free(new_ctx);
1706 EC_nistp224_pre_comp_free(pre);
1710 int ec_GFp_nistp224_have_precompute_mult(const EC_GROUP *group)
1712 return HAVEPRECOMP(group, nistp224);