X-Git-Url: https://git.openssl.org/?a=blobdiff_plain;f=crypto%2Fec%2Fec_mult.c;h=651de977c9e347cc4a968efb14a9d57100ba1468;hb=e3a4f8b84cd61d6a574d19668751f1e850920807;hp=ddd3db592154f0df8b235f21e453a665ed412afe;hpb=6f8f4431705bb3f8f58f86f9b9f54daa0d42667d;p=openssl.git diff --git a/crypto/ec/ec_mult.c b/crypto/ec/ec_mult.c index ddd3db5921..651de977c9 100644 --- a/crypto/ec/ec_mult.c +++ b/crypto/ec/ec_mult.c @@ -60,16 +60,90 @@ /* TODO: width-m NAFs */ -/* TODO: optional Lim-Lee precomputation for the generator */ +/* TODO: optional precomputation of multiples of the generator */ -/* this is just BN_window_bits_for_exponent_size from bn_lcl.h for now; - * the table should be updated for EC */ /* TODO */ #define EC_window_bits_for_scalar_size(b) \ - ((b) > 671 ? 6 : \ - (b) > 239 ? 5 : \ - (b) > 79 ? 4 : \ - (b) > 23 ? 3 : 1) + ((b) >= 2000 ? 6 : \ + (b) >= 800 ? 5 : \ + (b) >= 300 ? 4 : \ + (b) >= 70 ? 3 : \ + (b) >= 20 ? 2 : \ + 1) +/* For window size 'w' (w >= 2), we compute the odd multiples + * 1*P .. (2^w-1)*P. + * This accounts for 2^(w-1) point additions (neglecting constants), + * each of which requires 16 field multiplications (4 squarings + * and 12 general multiplications) in the case of curves defined + * over GF(p), which are the only curves we have so far. + * + * Converting these precomputed points into affine form takes + * three field multiplications for inverting Z and one squaring + * and three multiplications for adjusting X and Y, i.e. + * 7 multiplications in total (1 squaring and 6 general multiplications), + * again except for constants. + * + * The average number of windows for a 'b' bit scalar is roughly + * b/(w+1). + * Each of these windows (except possibly for the first one, but + * we are ignoring constants anyway) requires one point addition. + * As the precomputed table stores points in affine form, these + * additions take only 11 field multiplications each (3 squarings + * and 8 general multiplications). + * + * So the total workload, except for constants, is + * + * 2^(w-1)*[5 squarings + 18 multiplications] + * + (b/(w+1))*[3 squarings + 8 multiplications] + * + * If we assume that 10 squarings are as costly as 9 multiplications, + * our task is to find the 'w' that, given 'b', minimizes + * + * 2^(w-1)*(5*9 + 18*10) + (b/(w+1))*(3*9 + 8*10) + * = 2^(w-1)*225 + (b/(w+1))*107. + * + * Thus optimal window sizes should be roughly as follows: + * + * w >= 6 if b >= 1414 + * w = 5 if 1413 >= b >= 505 + * w = 4 if 504 >= b >= 169 + * w = 3 if 168 >= b >= 51 + * w = 2 if 50 >= b >= 13 + * w = 1 if 12 >= b + * + * If we assume instead that squarings are exactly as costly as + * multiplications, we have to minimize + * 2^(w-1)*23 + (b/(w+1))*11. + * + * This gives us the following (nearly unchanged) table of optimal + * windows sizes: + * + * w >= 6 if b >= 1406 + * w = 5 if 1405 >= b >= 502 + * w = 4 if 501 >= b >= 168 + * w = 3 if 167 >= b >= 51 + * w = 2 if 50 >= b >= 13 + * w = 1 if 12 >= b + * + * Note that neither table tries to take into account memory usage + * (allocation overhead, code locality etc.). Actual timings with + * NIST curves P-192, P-224, and P-256 with scalars of 192, 224, + * and 256 bits, respectively, show that w = 3 (instead of 4) is + * preferrable; timings with NIST curve P-384 and 384-bit scalars + * confirm that w = 4 is optimal for this case; and timings with + * NIST curve P-521 and 521-bit scalars show that w = 4 (instead + * of 5) is preferrable. So we generously round up all the + * boundaries and use the following table: + * + * w >= 6 if b >= 2000 + * w = 5 if 1999 >= b >= 800 + * w = 4 if 799 >= b >= 300 + * w = 3 if 299 >= b >= 70 + * w = 2 if 69 >= b >= 20 + * w = 1 if 19 >= b + */ + + /* Compute * \sum scalars[i]*points[i] @@ -77,8 +151,8 @@ * scalar*generator * is included in the addition if scalar != NULL */ -int EC_POINTs_mul(const EC_GROUP *group, EC_POINT *r, BIGNUM *scalar, - size_t num, EC_POINT *points[], BIGNUM *scalars[], BN_CTX *ctx) +int EC_POINTs_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *scalar, + size_t num, const EC_POINT *points[], const BIGNUM *scalars[], BN_CTX *ctx) { BN_CTX *new_ctx = NULL; EC_POINT *generator = NULL; @@ -132,7 +206,7 @@ int EC_POINTs_mul(const EC_GROUP *group, EC_POINT *r, BIGNUM *scalar, bits = i < num ? BN_num_bits(scalars[i]) : BN_num_bits(scalar); wsize[i] = EC_window_bits_for_scalar_size(bits); - num_val += 1 << (wsize[i] - 1); + num_val += 1u << (wsize[i] - 1); if (bits > max_bits) max_bits = bits; wbits[i] = 0; @@ -153,7 +227,7 @@ int EC_POINTs_mul(const EC_GROUP *group, EC_POINT *r, BIGNUM *scalar, for (i = 0; i < totalnum; i++) { val_sub[i] = v; - for (j = 0; j < (1 << (wsize[i] - 1)); j++) + for (j = 0; j < (1u << (wsize[i] - 1)); j++) { *v = EC_POINT_new(group); if (*v == NULL) goto err; @@ -187,23 +261,31 @@ int EC_POINTs_mul(const EC_GROUP *group, EC_POINT *r, BIGNUM *scalar, if (i < num) { if (!EC_POINT_copy(val_sub[i][0], points[i])) goto err; + if (scalars[i]->neg) + { + if (!EC_POINT_invert(group, val_sub[i][0], ctx)) goto err; + } } else { if (!EC_POINT_copy(val_sub[i][0], generator)) goto err; + if (scalar->neg) + { + if (!EC_POINT_invert(group, val_sub[i][0], ctx)) goto err; + } } if (wsize[i] > 1) { if (!EC_POINT_dbl(group, tmp, val_sub[i][0], ctx)) goto err; - for (j = 1; j < (1 << (wsize[i] - 1)); j++) + for (j = 1; j < (1u << (wsize[i] - 1)); j++) { if (!EC_POINT_add(group, val_sub[i][j], val_sub[i][j - 1], tmp, ctx)) goto err; } } } -#if 1 /* optional, maybe we should only do this if total_num > 1 */ +#if 1 /* optional; EC_window_bits_for_scalar_size assumes we do this step */ if (!EC_POINTs_make_affine(group, num_val, val, ctx)) goto err; #endif @@ -220,7 +302,7 @@ int EC_POINTs_mul(const EC_GROUP *group, EC_POINT *r, BIGNUM *scalar, { if (wbits[i] == 0) { - BIGNUM *s; + const BIGNUM *s; s = i < num ? scalars[i] : scalar; @@ -287,3 +369,59 @@ int EC_POINTs_mul(const EC_GROUP *group, EC_POINT *r, BIGNUM *scalar, } return ret; } + + +int EC_POINT_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, const EC_POINT *point, const BIGNUM *p_scalar, BN_CTX *ctx) + { + const EC_POINT *points[1]; + const BIGNUM *scalars[1]; + + points[0] = point; + scalars[0] = p_scalar; + + return EC_POINTs_mul(group, r, g_scalar, (point != NULL && p_scalar != NULL), points, scalars, ctx); + } + + +int EC_GROUP_precompute_mult(EC_GROUP *group, BN_CTX *ctx) + { + const EC_POINT *generator; + BN_CTX *new_ctx = NULL; + BIGNUM *order; + int ret = 0; + + generator = EC_GROUP_get0_generator(group); + if (generator == NULL) + { + ECerr(EC_F_EC_GROUP_PRECOMPUTE_MULT, EC_R_UNDEFINED_GENERATOR); + return 0; + } + + if (ctx == NULL) + { + ctx = new_ctx = BN_CTX_new(); + if (ctx == NULL) + return 0; + } + + BN_CTX_start(ctx); + order = BN_CTX_get(ctx); + if (order == NULL) goto err; + + if (!EC_GROUP_get_order(group, order, ctx)) return 0; + if (BN_is_zero(order)) + { + ECerr(EC_F_EC_GROUP_PRECOMPUTE_MULT, EC_R_UNKNOWN_ORDER); + goto err; + } + + /* TODO */ + + ret = 1; + + err: + BN_CTX_end(ctx); + if (new_ctx != NULL) + BN_CTX_free(new_ctx); + return ret; + }