2 * Copyright 2001-2021 The OpenSSL Project Authors. All Rights Reserved.
3 * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved
5 * Licensed under the Apache License 2.0 (the "License"). You may not use
6 * this file except in compliance with the License. You can obtain a copy
7 * in the file LICENSE in the source distribution or at
8 * https://www.openssl.org/source/license.html
12 * ECDSA low level APIs are deprecated for public use, but still ok for
15 #include "internal/deprecated.h"
18 #include <openssl/err.h>
20 #include "internal/cryptlib.h"
21 #include "crypto/bn.h"
23 #include "internal/refcount.h"
26 * This file implements the wNAF-based interleaving multi-exponentiation method
28 * http://www.informatik.tu-darmstadt.de/TI/Mitarbeiter/moeller.html#multiexp
29 * You might now find it here:
30 * http://link.springer.com/chapter/10.1007%2F3-540-45537-X_13
31 * http://www.bmoeller.de/pdf/TI-01-08.multiexp.pdf
32 * For multiplication with precomputation, we use wNAF splitting, formerly at:
33 * http://www.informatik.tu-darmstadt.de/TI/Mitarbeiter/moeller.html#fastexp
36 /* structure for precomputed multiples of the generator */
37 struct ec_pre_comp_st {
38 const EC_GROUP *group; /* parent EC_GROUP object */
39 size_t blocksize; /* block size for wNAF splitting */
40 size_t numblocks; /* max. number of blocks for which we have
42 size_t w; /* window size */
43 EC_POINT **points; /* array with pre-calculated multiples of
44 * generator: 'num' pointers to EC_POINT
45 * objects followed by a NULL */
46 size_t num; /* numblocks * 2^(w-1) */
47 CRYPTO_REF_COUNT references;
51 static EC_PRE_COMP *ec_pre_comp_new(const EC_GROUP *group)
53 EC_PRE_COMP *ret = NULL;
58 ret = OPENSSL_zalloc(sizeof(*ret));
63 ret->blocksize = 8; /* default */
64 ret->w = 4; /* default */
67 ret->lock = CRYPTO_THREAD_lock_new();
68 if (ret->lock == NULL) {
69 ERR_raise(ERR_LIB_EC, ERR_R_CRYPTO_LIB);
76 EC_PRE_COMP *EC_ec_pre_comp_dup(EC_PRE_COMP *pre)
80 CRYPTO_UP_REF(&pre->references, &i, pre->lock);
84 void EC_ec_pre_comp_free(EC_PRE_COMP *pre)
91 CRYPTO_DOWN_REF(&pre->references, &i, pre->lock);
92 REF_PRINT_COUNT("EC_ec", pre);
95 REF_ASSERT_ISNT(i < 0);
97 if (pre->points != NULL) {
100 for (pts = pre->points; *pts != NULL; pts++)
102 OPENSSL_free(pre->points);
104 CRYPTO_THREAD_lock_free(pre->lock);
108 #define EC_POINT_BN_set_flags(P, flags) do { \
109 BN_set_flags((P)->X, (flags)); \
110 BN_set_flags((P)->Y, (flags)); \
111 BN_set_flags((P)->Z, (flags)); \
115 * This functions computes a single point multiplication over the EC group,
116 * using, at a high level, a Montgomery ladder with conditional swaps, with
117 * various timing attack defenses.
119 * It performs either a fixed point multiplication
120 * (scalar * generator)
121 * when point is NULL, or a variable point multiplication
123 * when point is not NULL.
125 * `scalar` cannot be NULL and should be in the range [0,n) otherwise all
126 * constant time bets are off (where n is the cardinality of the EC group).
128 * This function expects `group->order` and `group->cardinality` to be well
129 * defined and non-zero: it fails with an error code otherwise.
131 * NB: This says nothing about the constant-timeness of the ladder step
132 * implementation (i.e., the default implementation is based on EC_POINT_add and
133 * EC_POINT_dbl, which of course are not constant time themselves) or the
134 * underlying multiprecision arithmetic.
136 * The product is stored in `r`.
138 * This is an internal function: callers are in charge of ensuring that the
139 * input parameters `group`, `r`, `scalar` and `ctx` are not NULL.
141 * Returns 1 on success, 0 otherwise.
143 int ossl_ec_scalar_mul_ladder(const EC_GROUP *group, EC_POINT *r,
144 const BIGNUM *scalar, const EC_POINT *point,
147 int i, cardinality_bits, group_top, kbit, pbit, Z_is_one;
151 BIGNUM *lambda = NULL;
152 BIGNUM *cardinality = NULL;
155 /* early exit if the input point is the point at infinity */
156 if (point != NULL && EC_POINT_is_at_infinity(group, point))
157 return EC_POINT_set_to_infinity(group, r);
159 if (BN_is_zero(group->order)) {
160 ERR_raise(ERR_LIB_EC, EC_R_UNKNOWN_ORDER);
163 if (BN_is_zero(group->cofactor)) {
164 ERR_raise(ERR_LIB_EC, EC_R_UNKNOWN_COFACTOR);
170 if (((p = EC_POINT_new(group)) == NULL)
171 || ((s = EC_POINT_new(group)) == NULL)) {
172 ERR_raise(ERR_LIB_EC, ERR_R_EC_LIB);
177 if (!EC_POINT_copy(p, group->generator)) {
178 ERR_raise(ERR_LIB_EC, ERR_R_EC_LIB);
182 if (!EC_POINT_copy(p, point)) {
183 ERR_raise(ERR_LIB_EC, ERR_R_EC_LIB);
188 EC_POINT_BN_set_flags(p, BN_FLG_CONSTTIME);
189 EC_POINT_BN_set_flags(r, BN_FLG_CONSTTIME);
190 EC_POINT_BN_set_flags(s, BN_FLG_CONSTTIME);
192 cardinality = BN_CTX_get(ctx);
193 lambda = BN_CTX_get(ctx);
196 ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
200 if (!BN_mul(cardinality, group->order, group->cofactor, ctx)) {
201 ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
206 * Group cardinalities are often on a word boundary.
207 * So when we pad the scalar, some timing diff might
208 * pop if it needs to be expanded due to carries.
209 * So expand ahead of time.
211 cardinality_bits = BN_num_bits(cardinality);
212 group_top = bn_get_top(cardinality);
213 if ((bn_wexpand(k, group_top + 2) == NULL)
214 || (bn_wexpand(lambda, group_top + 2) == NULL)) {
215 ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
219 if (!BN_copy(k, scalar)) {
220 ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
224 BN_set_flags(k, BN_FLG_CONSTTIME);
226 if ((BN_num_bits(k) > cardinality_bits) || (BN_is_negative(k))) {
228 * this is an unusual input, and we don't guarantee
231 if (!BN_nnmod(k, k, cardinality, ctx)) {
232 ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
237 if (!BN_add(lambda, k, cardinality)) {
238 ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
241 BN_set_flags(lambda, BN_FLG_CONSTTIME);
242 if (!BN_add(k, lambda, cardinality)) {
243 ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
247 * lambda := scalar + cardinality
248 * k := scalar + 2*cardinality
250 kbit = BN_is_bit_set(lambda, cardinality_bits);
251 BN_consttime_swap(kbit, k, lambda, group_top + 2);
253 group_top = bn_get_top(group->field);
254 if ((bn_wexpand(s->X, group_top) == NULL)
255 || (bn_wexpand(s->Y, group_top) == NULL)
256 || (bn_wexpand(s->Z, group_top) == NULL)
257 || (bn_wexpand(r->X, group_top) == NULL)
258 || (bn_wexpand(r->Y, group_top) == NULL)
259 || (bn_wexpand(r->Z, group_top) == NULL)
260 || (bn_wexpand(p->X, group_top) == NULL)
261 || (bn_wexpand(p->Y, group_top) == NULL)
262 || (bn_wexpand(p->Z, group_top) == NULL)) {
263 ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
267 /* ensure input point is in affine coords for ladder step efficiency */
268 if (!p->Z_is_one && (group->meth->make_affine == NULL
269 || !group->meth->make_affine(group, p, ctx))) {
270 ERR_raise(ERR_LIB_EC, ERR_R_EC_LIB);
274 /* Initialize the Montgomery ladder */
275 if (!ec_point_ladder_pre(group, r, s, p, ctx)) {
276 ERR_raise(ERR_LIB_EC, EC_R_LADDER_PRE_FAILURE);
280 /* top bit is a 1, in a fixed pos */
283 #define EC_POINT_CSWAP(c, a, b, w, t) do { \
284 BN_consttime_swap(c, (a)->X, (b)->X, w); \
285 BN_consttime_swap(c, (a)->Y, (b)->Y, w); \
286 BN_consttime_swap(c, (a)->Z, (b)->Z, w); \
287 t = ((a)->Z_is_one ^ (b)->Z_is_one) & (c); \
288 (a)->Z_is_one ^= (t); \
289 (b)->Z_is_one ^= (t); \
293 * The ladder step, with branches, is
295 * k[i] == 0: S = add(R, S), R = dbl(R)
296 * k[i] == 1: R = add(S, R), S = dbl(S)
298 * Swapping R, S conditionally on k[i] leaves you with state
300 * k[i] == 0: T, U = R, S
301 * k[i] == 1: T, U = S, R
303 * Then perform the ECC ops.
308 * Which leaves you with state
310 * k[i] == 0: U = add(R, S), T = dbl(R)
311 * k[i] == 1: U = add(S, R), T = dbl(S)
313 * Swapping T, U conditionally on k[i] leaves you with state
315 * k[i] == 0: R, S = T, U
316 * k[i] == 1: R, S = U, T
318 * Which leaves you with state
320 * k[i] == 0: S = add(R, S), R = dbl(R)
321 * k[i] == 1: R = add(S, R), S = dbl(S)
323 * So we get the same logic, but instead of a branch it's a
324 * conditional swap, followed by ECC ops, then another conditional swap.
326 * Optimization: The end of iteration i and start of i-1 looks like
333 * CSWAP(k[i-1], R, S)
335 * CSWAP(k[i-1], R, S)
338 * So instead of two contiguous swaps, you can merge the condition
339 * bits and do a single swap.
341 * k[i] k[i-1] Outcome
347 * This is XOR. pbit tracks the previous bit of k.
350 for (i = cardinality_bits - 1; i >= 0; i--) {
351 kbit = BN_is_bit_set(k, i) ^ pbit;
352 EC_POINT_CSWAP(kbit, r, s, group_top, Z_is_one);
354 /* Perform a single step of the Montgomery ladder */
355 if (!ec_point_ladder_step(group, r, s, p, ctx)) {
356 ERR_raise(ERR_LIB_EC, EC_R_LADDER_STEP_FAILURE);
360 * pbit logic merges this cswap with that of the
365 /* one final cswap to move the right value into r */
366 EC_POINT_CSWAP(pbit, r, s, group_top, Z_is_one);
367 #undef EC_POINT_CSWAP
369 /* Finalize ladder (and recover full point coordinates) */
370 if (!ec_point_ladder_post(group, r, s, p, ctx)) {
371 ERR_raise(ERR_LIB_EC, EC_R_LADDER_POST_FAILURE);
379 EC_POINT_clear_free(s);
385 #undef EC_POINT_BN_set_flags
388 * Table could be optimised for the wNAF-based implementation,
389 * sometimes smaller windows will give better performance (thus the
390 * boundaries should be increased)
392 #define EC_window_bits_for_scalar_size(b) \
403 * \sum scalars[i]*points[i],
406 * in the addition if scalar != NULL
408 int ossl_ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *scalar,
409 size_t num, const EC_POINT *points[],
410 const BIGNUM *scalars[], BN_CTX *ctx)
412 const EC_POINT *generator = NULL;
413 EC_POINT *tmp = NULL;
415 size_t blocksize = 0, numblocks = 0; /* for wNAF splitting */
416 size_t pre_points_per_block = 0;
419 int r_is_inverted = 0;
420 int r_is_at_infinity = 1;
421 size_t *wsize = NULL; /* individual window sizes */
422 signed char **wNAF = NULL; /* individual wNAFs */
423 size_t *wNAF_len = NULL;
426 EC_POINT **val = NULL; /* precomputation */
428 EC_POINT ***val_sub = NULL; /* pointers to sub-arrays of 'val' or
429 * 'pre_comp->points' */
430 const EC_PRE_COMP *pre_comp = NULL;
431 int num_scalar = 0; /* flag: will be set to 1 if 'scalar' must be
432 * treated like other scalars, i.e.
433 * precomputation is not available */
436 if (!BN_is_zero(group->order) && !BN_is_zero(group->cofactor)) {
438 * Handle the common cases where the scalar is secret, enforcing a
439 * scalar multiplication implementation based on a Montgomery ladder,
440 * with various timing attack defenses.
442 if ((scalar != group->order) && (scalar != NULL) && (num == 0)) {
444 * In this case we want to compute scalar * GeneratorPoint: this
445 * codepath is reached most prominently by (ephemeral) key
446 * generation of EC cryptosystems (i.e. ECDSA keygen and sign setup,
447 * ECDH keygen/first half), where the scalar is always secret. This
448 * is why we ignore if BN_FLG_CONSTTIME is actually set and we
449 * always call the ladder version.
451 return ossl_ec_scalar_mul_ladder(group, r, scalar, NULL, ctx);
453 if ((scalar == NULL) && (num == 1) && (scalars[0] != group->order)) {
455 * In this case we want to compute scalar * VariablePoint: this
456 * codepath is reached most prominently by the second half of ECDH,
457 * where the secret scalar is multiplied by the peer's public point.
458 * To protect the secret scalar, we ignore if BN_FLG_CONSTTIME is
459 * actually set and we always call the ladder version.
461 return ossl_ec_scalar_mul_ladder(group, r, scalars[0], points[0],
466 if (scalar != NULL) {
467 generator = EC_GROUP_get0_generator(group);
468 if (generator == NULL) {
469 ERR_raise(ERR_LIB_EC, EC_R_UNDEFINED_GENERATOR);
473 /* look if we can use precomputed multiples of generator */
475 pre_comp = group->pre_comp.ec;
476 if (pre_comp && pre_comp->numblocks
477 && (EC_POINT_cmp(group, generator, pre_comp->points[0], ctx) ==
479 blocksize = pre_comp->blocksize;
482 * determine maximum number of blocks that wNAF splitting may
483 * yield (NB: maximum wNAF length is bit length plus one)
485 numblocks = (BN_num_bits(scalar) / blocksize) + 1;
488 * we cannot use more blocks than we have precomputation for
490 if (numblocks > pre_comp->numblocks)
491 numblocks = pre_comp->numblocks;
493 pre_points_per_block = (size_t)1 << (pre_comp->w - 1);
495 /* check that pre_comp looks sane */
496 if (pre_comp->num != (pre_comp->numblocks * pre_points_per_block)) {
497 ERR_raise(ERR_LIB_EC, ERR_R_INTERNAL_ERROR);
501 /* can't use precomputation */
504 num_scalar = 1; /* treat 'scalar' like 'num'-th element of
509 totalnum = num + numblocks;
511 wsize = OPENSSL_malloc(totalnum * sizeof(wsize[0]));
512 wNAF_len = OPENSSL_malloc(totalnum * sizeof(wNAF_len[0]));
513 /* include space for pivot */
514 wNAF = OPENSSL_malloc((totalnum + 1) * sizeof(wNAF[0]));
515 val_sub = OPENSSL_malloc(totalnum * sizeof(val_sub[0]));
517 /* Ensure wNAF is initialised in case we end up going to err */
519 wNAF[0] = NULL; /* preliminary pivot */
521 if (wsize == NULL || wNAF_len == NULL || wNAF == NULL || val_sub == NULL)
525 * num_val will be the total number of temporarily precomputed points
529 for (i = 0; i < num + num_scalar; i++) {
532 bits = i < num ? BN_num_bits(scalars[i]) : BN_num_bits(scalar);
533 wsize[i] = EC_window_bits_for_scalar_size(bits);
534 num_val += (size_t)1 << (wsize[i] - 1);
535 wNAF[i + 1] = NULL; /* make sure we always have a pivot */
537 bn_compute_wNAF((i < num ? scalars[i] : scalar), wsize[i],
541 if (wNAF_len[i] > max_len)
542 max_len = wNAF_len[i];
546 /* we go here iff scalar != NULL */
548 if (pre_comp == NULL) {
549 if (num_scalar != 1) {
550 ERR_raise(ERR_LIB_EC, ERR_R_INTERNAL_ERROR);
553 /* we have already generated a wNAF for 'scalar' */
555 signed char *tmp_wNAF = NULL;
558 if (num_scalar != 0) {
559 ERR_raise(ERR_LIB_EC, ERR_R_INTERNAL_ERROR);
564 * use the window size for which we have precomputation
566 wsize[num] = pre_comp->w;
567 tmp_wNAF = bn_compute_wNAF(scalar, wsize[num], &tmp_len);
571 if (tmp_len <= max_len) {
573 * One of the other wNAFs is at least as long as the wNAF
574 * belonging to the generator, so wNAF splitting will not buy
579 totalnum = num + 1; /* don't use wNAF splitting */
580 wNAF[num] = tmp_wNAF;
581 wNAF[num + 1] = NULL;
582 wNAF_len[num] = tmp_len;
584 * pre_comp->points starts with the points that we need here:
586 val_sub[num] = pre_comp->points;
589 * don't include tmp_wNAF directly into wNAF array - use wNAF
590 * splitting and include the blocks
594 EC_POINT **tmp_points;
596 if (tmp_len < numblocks * blocksize) {
598 * possibly we can do with fewer blocks than estimated
600 numblocks = (tmp_len + blocksize - 1) / blocksize;
601 if (numblocks > pre_comp->numblocks) {
602 ERR_raise(ERR_LIB_EC, ERR_R_INTERNAL_ERROR);
603 OPENSSL_free(tmp_wNAF);
606 totalnum = num + numblocks;
609 /* split wNAF in 'numblocks' parts */
611 tmp_points = pre_comp->points;
613 for (i = num; i < totalnum; i++) {
614 if (i < totalnum - 1) {
615 wNAF_len[i] = blocksize;
616 if (tmp_len < blocksize) {
617 ERR_raise(ERR_LIB_EC, ERR_R_INTERNAL_ERROR);
618 OPENSSL_free(tmp_wNAF);
621 tmp_len -= blocksize;
624 * last block gets whatever is left (this could be
625 * more or less than 'blocksize'!)
627 wNAF_len[i] = tmp_len;
630 wNAF[i] = OPENSSL_malloc(wNAF_len[i]);
631 if (wNAF[i] == NULL) {
632 OPENSSL_free(tmp_wNAF);
635 memcpy(wNAF[i], pp, wNAF_len[i]);
636 if (wNAF_len[i] > max_len)
637 max_len = wNAF_len[i];
639 if (*tmp_points == NULL) {
640 ERR_raise(ERR_LIB_EC, ERR_R_INTERNAL_ERROR);
641 OPENSSL_free(tmp_wNAF);
644 val_sub[i] = tmp_points;
645 tmp_points += pre_points_per_block;
648 OPENSSL_free(tmp_wNAF);
654 * All points we precompute now go into a single array 'val'.
655 * 'val_sub[i]' is a pointer to the subarray for the i-th point, or to a
656 * subarray of 'pre_comp->points' if we already have precomputation.
658 val = OPENSSL_malloc((num_val + 1) * sizeof(val[0]));
661 val[num_val] = NULL; /* pivot element */
663 /* allocate points for precomputation */
665 for (i = 0; i < num + num_scalar; i++) {
667 for (j = 0; j < ((size_t)1 << (wsize[i] - 1)); j++) {
668 *v = EC_POINT_new(group);
674 if (!(v == val + num_val)) {
675 ERR_raise(ERR_LIB_EC, ERR_R_INTERNAL_ERROR);
679 if ((tmp = EC_POINT_new(group)) == NULL)
683 * prepare precomputed values:
684 * val_sub[i][0] := points[i]
685 * val_sub[i][1] := 3 * points[i]
686 * val_sub[i][2] := 5 * points[i]
689 for (i = 0; i < num + num_scalar; i++) {
691 if (!EC_POINT_copy(val_sub[i][0], points[i]))
694 if (!EC_POINT_copy(val_sub[i][0], generator))
699 if (!EC_POINT_dbl(group, tmp, val_sub[i][0], ctx))
701 for (j = 1; j < ((size_t)1 << (wsize[i] - 1)); j++) {
703 (group, val_sub[i][j], val_sub[i][j - 1], tmp, ctx))
709 if (group->meth->points_make_affine == NULL
710 || !group->meth->points_make_affine(group, num_val, val, ctx))
713 r_is_at_infinity = 1;
715 for (k = max_len - 1; k >= 0; k--) {
716 if (!r_is_at_infinity) {
717 if (!EC_POINT_dbl(group, r, r, ctx))
721 for (i = 0; i < totalnum; i++) {
722 if (wNAF_len[i] > (size_t)k) {
723 int digit = wNAF[i][k];
732 if (is_neg != r_is_inverted) {
733 if (!r_is_at_infinity) {
734 if (!EC_POINT_invert(group, r, ctx))
737 r_is_inverted = !r_is_inverted;
742 if (r_is_at_infinity) {
743 if (!EC_POINT_copy(r, val_sub[i][digit >> 1]))
747 * Apply coordinate blinding for EC_POINT.
749 * The underlying EC_METHOD can optionally implement this function:
750 * ossl_ec_point_blind_coordinates() returns 0 in case of errors or 1 on
751 * success or if coordinate blinding is not implemented for this
754 if (!ossl_ec_point_blind_coordinates(group, r, ctx)) {
755 ERR_raise(ERR_LIB_EC, EC_R_POINT_COORDINATES_BLIND_FAILURE);
759 r_is_at_infinity = 0;
762 (group, r, r, val_sub[i][digit >> 1], ctx))
770 if (r_is_at_infinity) {
771 if (!EC_POINT_set_to_infinity(group, r))
775 if (!EC_POINT_invert(group, r, ctx))
784 OPENSSL_free(wNAF_len);
788 for (w = wNAF; *w != NULL; w++)
794 for (v = val; *v != NULL; v++)
795 EC_POINT_clear_free(*v);
799 OPENSSL_free(val_sub);
804 * ossl_ec_wNAF_precompute_mult()
805 * creates an EC_PRE_COMP object with preprecomputed multiples of the generator
806 * for use with wNAF splitting as implemented in ossl_ec_wNAF_mul().
808 * 'pre_comp->points' is an array of multiples of the generator
809 * of the following form:
810 * points[0] = generator;
811 * points[1] = 3 * generator;
813 * points[2^(w-1)-1] = (2^(w-1)-1) * generator;
814 * points[2^(w-1)] = 2^blocksize * generator;
815 * points[2^(w-1)+1] = 3 * 2^blocksize * generator;
817 * points[2^(w-1)*(numblocks-1)-1] = (2^(w-1)) * 2^(blocksize*(numblocks-2)) * generator
818 * points[2^(w-1)*(numblocks-1)] = 2^(blocksize*(numblocks-1)) * generator
820 * points[2^(w-1)*numblocks-1] = (2^(w-1)) * 2^(blocksize*(numblocks-1)) * generator
821 * points[2^(w-1)*numblocks] = NULL
823 int ossl_ec_wNAF_precompute_mult(EC_GROUP *group, BN_CTX *ctx)
825 const EC_POINT *generator;
826 EC_POINT *tmp_point = NULL, *base = NULL, **var;
828 size_t i, bits, w, pre_points_per_block, blocksize, numblocks, num;
829 EC_POINT **points = NULL;
830 EC_PRE_COMP *pre_comp;
834 BN_CTX *new_ctx = NULL;
837 /* if there is an old EC_PRE_COMP object, throw it away */
838 EC_pre_comp_free(group);
839 if ((pre_comp = ec_pre_comp_new(group)) == NULL)
842 generator = EC_GROUP_get0_generator(group);
843 if (generator == NULL) {
844 ERR_raise(ERR_LIB_EC, EC_R_UNDEFINED_GENERATOR);
850 ctx = new_ctx = BN_CTX_new();
858 order = EC_GROUP_get0_order(group);
861 if (BN_is_zero(order)) {
862 ERR_raise(ERR_LIB_EC, EC_R_UNKNOWN_ORDER);
866 bits = BN_num_bits(order);
868 * The following parameters mean we precompute (approximately) one point
869 * per bit. TBD: The combination 8, 4 is perfect for 160 bits; for other
870 * bit lengths, other parameter combinations might provide better
875 if (EC_window_bits_for_scalar_size(bits) > w) {
876 /* let's not make the window too small ... */
877 w = EC_window_bits_for_scalar_size(bits);
880 numblocks = (bits + blocksize - 1) / blocksize; /* max. number of blocks
884 pre_points_per_block = (size_t)1 << (w - 1);
885 num = pre_points_per_block * numblocks; /* number of points to compute
888 points = OPENSSL_malloc(sizeof(*points) * (num + 1));
893 var[num] = NULL; /* pivot */
894 for (i = 0; i < num; i++) {
895 if ((var[i] = EC_POINT_new(group)) == NULL) {
896 ERR_raise(ERR_LIB_EC, ERR_R_EC_LIB);
901 if ((tmp_point = EC_POINT_new(group)) == NULL
902 || (base = EC_POINT_new(group)) == NULL) {
903 ERR_raise(ERR_LIB_EC, ERR_R_EC_LIB);
907 if (!EC_POINT_copy(base, generator))
910 /* do the precomputation */
911 for (i = 0; i < numblocks; i++) {
914 if (!EC_POINT_dbl(group, tmp_point, base, ctx))
917 if (!EC_POINT_copy(*var++, base))
920 for (j = 1; j < pre_points_per_block; j++, var++) {
922 * calculate odd multiples of the current base point
924 if (!EC_POINT_add(group, *var, tmp_point, *(var - 1), ctx))
928 if (i < numblocks - 1) {
930 * get the next base (multiply current one by 2^blocksize)
934 if (blocksize <= 2) {
935 ERR_raise(ERR_LIB_EC, ERR_R_INTERNAL_ERROR);
939 if (!EC_POINT_dbl(group, base, tmp_point, ctx))
941 for (k = 2; k < blocksize; k++) {
942 if (!EC_POINT_dbl(group, base, base, ctx))
948 if (group->meth->points_make_affine == NULL
949 || !group->meth->points_make_affine(group, num, points, ctx))
952 pre_comp->group = group;
953 pre_comp->blocksize = blocksize;
954 pre_comp->numblocks = numblocks;
956 pre_comp->points = points;
959 SETPRECOMP(group, ec, pre_comp);
967 BN_CTX_free(new_ctx);
969 EC_ec_pre_comp_free(pre_comp);
973 for (p = points; *p != NULL; p++)
975 OPENSSL_free(points);
977 EC_POINT_free(tmp_point);
982 int ossl_ec_wNAF_have_precompute_mult(const EC_GROUP *group)
984 return HAVEPRECOMP(group, ec);