X-Git-Url: https://git.openssl.org/gitweb/?p=openssl.git;a=blobdiff_plain;f=crypto%2Fec%2Fecp_nistz256.c;h=3863a61b02027c56d67b1e49d50b6839b63f79ed;hp=f4712bda0664974249d43552f7d4eeee6492d3a8;hb=cd420b0b1f2336972e386eba1cccf23b47d99538;hpb=dccd20d1b55d15afdc80ad987ff37023d323dc42 diff --git a/crypto/ec/ecp_nistz256.c b/crypto/ec/ecp_nistz256.c index f4712bda06..3863a61b02 100644 --- a/crypto/ec/ecp_nistz256.c +++ b/crypto/ec/ecp_nistz256.c @@ -1,36 +1,27 @@ -/****************************************************************************** - * * - * Copyright 2014 Intel Corporation * - * * - * Licensed under the Apache License, Version 2.0 (the "License"); * - * you may not use this file except in compliance with the License. * - * You may obtain a copy of the License at * - * * - * http://www.apache.org/licenses/LICENSE-2.0 * - * * - * Unless required by applicable law or agreed to in writing, software * - * distributed under the License is distributed on an "AS IS" BASIS, * - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * - * See the License for the specific language governing permissions and * - * limitations under the License. * - * * - ****************************************************************************** - * * - * Developers and authors: * - * Shay Gueron (1, 2), and Vlad Krasnov (1) * - * (1) Intel Corporation, Israel Development Center * - * (2) University of Haifa * - * Reference: * - * S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with * - * 256 Bit Primes" * - * * - ******************************************************************************/ +/* + * Copyright 2014-2017 The OpenSSL Project Authors. All Rights Reserved. + * Copyright (c) 2014, Intel Corporation. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + * + * Originally written by Shay Gueron (1, 2), and Vlad Krasnov (1) + * (1) Intel Corporation, Israel Development Center, Haifa, Israel + * (2) University of Haifa, Israel + * + * Reference: + * S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with + * 256 Bit Primes" + */ #include #include "internal/cryptlib.h" #include "internal/bn_int.h" #include "ec_lcl.h" +#include "internal/refcount.h" #if BN_BITS2 != 64 # define TOBN(hi,lo) lo,hi @@ -75,24 +66,41 @@ struct nistz256_pre_comp_st { */ PRECOMP256_ROW *precomp; void *precomp_storage; - int references; + CRYPTO_REF_COUNT references; CRYPTO_RWLOCK *lock; }; /* Functions implemented in assembly */ +/* + * Most of below mentioned functions *preserve* the property of inputs + * being fully reduced, i.e. being in [0, modulus) range. Simply put if + * inputs are fully reduced, then output is too. Note that reverse is + * not true, in sense that given partially reduced inputs output can be + * either, not unlikely reduced. And "most" in first sentence refers to + * the fact that given the calculations flow one can tolerate that + * addition, 1st function below, produces partially reduced result *if* + * multiplications by 2 and 3, which customarily use addition, fully + * reduce it. This effectively gives two options: a) addition produces + * fully reduced result [as long as inputs are, just like remaining + * functions]; b) addition is allowed to produce partially reduced + * result, but multiplications by 2 and 3 perform additional reduction + * step. Choice between the two can be platform-specific, but it was a) + * in all cases so far... + */ +/* Modular add: res = a+b mod P */ +void ecp_nistz256_add(BN_ULONG res[P256_LIMBS], + const BN_ULONG a[P256_LIMBS], + const BN_ULONG b[P256_LIMBS]); /* Modular mul by 2: res = 2*a mod P */ void ecp_nistz256_mul_by_2(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS]); -/* Modular div by 2: res = a/2 mod P */ -void ecp_nistz256_div_by_2(BN_ULONG res[P256_LIMBS], - const BN_ULONG a[P256_LIMBS]); /* Modular mul by 3: res = 3*a mod P */ void ecp_nistz256_mul_by_3(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS]); -/* Modular add: res = a+b mod P */ -void ecp_nistz256_add(BN_ULONG res[P256_LIMBS], - const BN_ULONG a[P256_LIMBS], - const BN_ULONG b[P256_LIMBS]); + +/* Modular div by 2: res = a/2 mod P */ +void ecp_nistz256_div_by_2(BN_ULONG res[P256_LIMBS], + const BN_ULONG a[P256_LIMBS]); /* Modular sub: res = a-b mod P */ void ecp_nistz256_sub(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS], @@ -203,23 +211,41 @@ static BN_ULONG is_equal(const BN_ULONG a[P256_LIMBS], return is_zero(res); } -static BN_ULONG is_one(const BN_ULONG a[P256_LIMBS]) +static BN_ULONG is_one(const BIGNUM *z) { - BN_ULONG res; - - res = a[0] ^ ONE[0]; - res |= a[1] ^ ONE[1]; - res |= a[2] ^ ONE[2]; - res |= a[3] ^ ONE[3]; - if (P256_LIMBS == 8) { - res |= a[4] ^ ONE[4]; - res |= a[5] ^ ONE[5]; - res |= a[6] ^ ONE[6]; + BN_ULONG res = 0; + BN_ULONG *a = bn_get_words(z); + + if (bn_get_top(z) == (P256_LIMBS - P256_LIMBS / 8)) { + res = a[0] ^ ONE[0]; + res |= a[1] ^ ONE[1]; + res |= a[2] ^ ONE[2]; + res |= a[3] ^ ONE[3]; + if (P256_LIMBS == 8) { + res |= a[4] ^ ONE[4]; + res |= a[5] ^ ONE[5]; + res |= a[6] ^ ONE[6]; + /* + * no check for a[7] (being zero) on 32-bit platforms, + * because value of "one" takes only 7 limbs. + */ + } + res = is_zero(res); } - return is_zero(res); + return res; } +/* + * For reference, this macro is used only when new ecp_nistz256 assembly + * module is being developed. For example, configure with + * -DECP_NISTZ256_REFERENCE_IMPLEMENTATION and implement only functions + * performing simplest arithmetic operations on 256-bit vectors. Then + * work on implementation of higher-level functions performing point + * operations. Then remove ECP_NISTZ256_REFERENCE_IMPLEMENTATION + * and never define it again. (The correct macro denoting presence of + * ecp_nistz256 module is ECP_NISTZ256_ASM.) + */ #ifndef ECP_NISTZ256_REFERENCE_IMPLEMENTATION void ecp_nistz256_point_double(P256_POINT *r, const P256_POINT *a); void ecp_nistz256_point_add(P256_POINT *r, @@ -301,19 +327,16 @@ static void ecp_nistz256_point_add(P256_POINT *r, const BN_ULONG *in2_y = b->Y; const BN_ULONG *in2_z = b->Z; - /* We encode infinity as (0,0), which is not on the curve, - * so it is OK. */ - in1infty = (in1_x[0] | in1_x[1] | in1_x[2] | in1_x[3] | - in1_y[0] | in1_y[1] | in1_y[2] | in1_y[3]); + /* + * Infinity in encoded as (,,0) + */ + in1infty = (in1_z[0] | in1_z[1] | in1_z[2] | in1_z[3]); if (P256_LIMBS == 8) - in1infty |= (in1_x[4] | in1_x[5] | in1_x[6] | in1_x[7] | - in1_y[4] | in1_y[5] | in1_y[6] | in1_y[7]); + in1infty |= (in1_z[4] | in1_z[5] | in1_z[6] | in1_z[7]); - in2infty = (in2_x[0] | in2_x[1] | in2_x[2] | in2_x[3] | - in2_y[0] | in2_y[1] | in2_y[2] | in2_y[3]); + in2infty = (in2_z[0] | in2_z[1] | in2_z[2] | in2_z[3]); if (P256_LIMBS == 8) - in2infty |= (in2_x[4] | in2_x[5] | in2_x[6] | in2_x[7] | - in2_y[4] | in2_y[5] | in2_y[6] | in2_y[7]); + in2infty |= (in2_z[4] | in2_z[5] | in2_z[6] | in2_z[7]); in1infty = is_zero(in1infty); in2infty = is_zero(in2infty); @@ -402,15 +425,16 @@ static void ecp_nistz256_point_add_affine(P256_POINT *r, const BN_ULONG *in2_y = b->Y; /* - * In affine representation we encode infty as (0,0), which is not on the - * curve, so it is OK + * Infinity in encoded as (,,0) */ - in1infty = (in1_x[0] | in1_x[1] | in1_x[2] | in1_x[3] | - in1_y[0] | in1_y[1] | in1_y[2] | in1_y[3]); + in1infty = (in1_z[0] | in1_z[1] | in1_z[2] | in1_z[3]); if (P256_LIMBS == 8) - in1infty |= (in1_x[4] | in1_x[5] | in1_x[6] | in1_x[7] | - in1_y[4] | in1_y[5] | in1_y[6] | in1_y[7]); + in1infty |= (in1_z[4] | in1_z[5] | in1_z[6] | in1_z[7]); + /* + * In affine representation we encode infinity as (0,0), which is + * not on the curve, so it is OK + */ in2infty = (in2_x[0] | in2_x[1] | in2_x[2] | in2_x[3] | in2_y[0] | in2_y[1] | in2_y[2] | in2_y[3]); if (P256_LIMBS == 8) @@ -725,12 +749,12 @@ __owur static int ecp_nistz256_windowed_mul(const EC_GROUP *group, } /* Coordinates of G, for which we have precomputed tables */ -const static BN_ULONG def_xG[P256_LIMBS] = { +static const BN_ULONG def_xG[P256_LIMBS] = { TOBN(0x79e730d4, 0x18a9143c), TOBN(0x75ba95fc, 0x5fedb601), TOBN(0x79fb732b, 0x77622510), TOBN(0x18905f76, 0xa53755c6) }; -const static BN_ULONG def_yG[P256_LIMBS] = { +static const BN_ULONG def_yG[P256_LIMBS] = { TOBN(0xddf25357, 0xce95560a), TOBN(0x8b4ab8e4, 0xba19e45c), TOBN(0xd2e88688, 0xdd21f325), TOBN(0x8571ff18, 0x25885d85) }; @@ -743,10 +767,9 @@ static int ecp_nistz256_is_affine_G(const EC_POINT *generator) { return (bn_get_top(generator->X) == P256_LIMBS) && (bn_get_top(generator->Y) == P256_LIMBS) && - (bn_get_top(generator->Z) == (P256_LIMBS - P256_LIMBS / 8)) && is_equal(bn_get_words(generator->X), def_xG) && is_equal(bn_get_words(generator->Y), def_yG) && - is_one(bn_get_words(generator->Z)); + is_one(generator->Z); } __owur static int ecp_nistz256_mult_precompute(EC_GROUP *group, BN_CTX *ctx) @@ -1240,6 +1263,8 @@ __owur static int ecp_nistz256_points_mul(const EC_GROUP *group, } else #endif { + BN_ULONG infty; + /* First window */ wvalue = (p_str[0] << 1) & mask; idx += window_size; @@ -1252,7 +1277,30 @@ __owur static int ecp_nistz256_points_mul(const EC_GROUP *group, ecp_nistz256_neg(p.p.Z, p.p.Y); copy_conditional(p.p.Y, p.p.Z, wvalue & 1); - memcpy(p.p.Z, ONE, sizeof(ONE)); + /* + * Since affine infinity is encoded as (0,0) and + * Jacobian ias (,,0), we need to harmonize them + * by assigning "one" or zero to Z. + */ + infty = (p.p.X[0] | p.p.X[1] | p.p.X[2] | p.p.X[3] | + p.p.Y[0] | p.p.Y[1] | p.p.Y[2] | p.p.Y[3]); + if (P256_LIMBS == 8) + infty |= (p.p.X[4] | p.p.X[5] | p.p.X[6] | p.p.X[7] | + p.p.Y[4] | p.p.Y[5] | p.p.Y[6] | p.p.Y[7]); + + infty = 0 - is_zero(infty); + infty = ~infty; + + p.p.Z[0] = ONE[0] & infty; + p.p.Z[1] = ONE[1] & infty; + p.p.Z[2] = ONE[2] & infty; + p.p.Z[3] = ONE[3] & infty; + if (P256_LIMBS == 8) { + p.p.Z[4] = ONE[4] & infty; + p.p.Z[5] = ONE[5] & infty; + p.p.Z[6] = ONE[6] & infty; + p.p.Z[7] = ONE[7] & infty; + } for (i = 1; i < 37; i++) { unsigned int off = (idx - 1) / 8; @@ -1323,7 +1371,7 @@ __owur static int ecp_nistz256_points_mul(const EC_GROUP *group, !bn_set_words(r->Z, p.p.Z, P256_LIMBS)) { goto err; } - r->Z_is_one = is_one(p.p.Z) & 1; + r->Z_is_one = is_one(r->Z) & 1; ret = 1; @@ -1411,7 +1459,7 @@ NISTZ256_PRE_COMP *EC_nistz256_pre_comp_dup(NISTZ256_PRE_COMP *p) { int i; if (p != NULL) - CRYPTO_atomic_add(&p->references, 1, &i, p->lock); + CRYPTO_UP_REF(&p->references, &i, p->lock); return p; } @@ -1422,7 +1470,7 @@ void EC_nistz256_pre_comp_free(NISTZ256_PRE_COMP *pre) if (pre == NULL) return; - CRYPTO_atomic_add(&pre->references, -1, &i, pre->lock); + CRYPTO_DOWN_REF(&pre->references, &i, pre->lock); REF_PRINT_COUNT("EC_nistz256", x); if (i > 0) return;