2 * Copyright 2001-2017 The OpenSSL Project Authors. All Rights Reserved.
3 * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved
5 * Licensed under the OpenSSL license (the "License"). You may not use
6 * this file except in compliance with the License. You can obtain a copy
7 * in the file LICENSE in the source distribution or at
8 * https://www.openssl.org/source/license.html
12 #include <openssl/err.h>
14 #include "internal/cryptlib.h"
15 #include "internal/bn_int.h"
17 #include "internal/refcount.h"
20 * This file implements the wNAF-based interleaving multi-exponentiation method
22 * http://www.informatik.tu-darmstadt.de/TI/Mitarbeiter/moeller.html#multiexp
23 * You might now find it here:
24 * http://link.springer.com/chapter/10.1007%2F3-540-45537-X_13
25 * http://www.bmoeller.de/pdf/TI-01-08.multiexp.pdf
26 * For multiplication with precomputation, we use wNAF splitting, formerly at:
27 * http://www.informatik.tu-darmstadt.de/TI/Mitarbeiter/moeller.html#fastexp
30 /* structure for precomputed multiples of the generator */
31 struct ec_pre_comp_st {
32 const EC_GROUP *group; /* parent EC_GROUP object */
33 size_t blocksize; /* block size for wNAF splitting */
34 size_t numblocks; /* max. number of blocks for which we have
36 size_t w; /* window size */
37 EC_POINT **points; /* array with pre-calculated multiples of
38 * generator: 'num' pointers to EC_POINT
39 * objects followed by a NULL */
40 size_t num; /* numblocks * 2^(w-1) */
41 CRYPTO_REF_COUNT references;
45 static EC_PRE_COMP *ec_pre_comp_new(const EC_GROUP *group)
47 EC_PRE_COMP *ret = NULL;
52 ret = OPENSSL_zalloc(sizeof(*ret));
54 ECerr(EC_F_EC_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
59 ret->blocksize = 8; /* default */
60 ret->w = 4; /* default */
63 ret->lock = CRYPTO_THREAD_lock_new();
64 if (ret->lock == NULL) {
65 ECerr(EC_F_EC_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
72 EC_PRE_COMP *EC_ec_pre_comp_dup(EC_PRE_COMP *pre)
76 CRYPTO_UP_REF(&pre->references, &i, pre->lock);
80 void EC_ec_pre_comp_free(EC_PRE_COMP *pre)
87 CRYPTO_DOWN_REF(&pre->references, &i, pre->lock);
88 REF_PRINT_COUNT("EC_ec", pre);
91 REF_ASSERT_ISNT(i < 0);
93 if (pre->points != NULL) {
96 for (pts = pre->points; *pts != NULL; pts++)
98 OPENSSL_free(pre->points);
100 CRYPTO_THREAD_lock_free(pre->lock);
104 #define EC_POINT_BN_set_flags(P, flags) do { \
105 BN_set_flags((P)->X, (flags)); \
106 BN_set_flags((P)->Y, (flags)); \
107 BN_set_flags((P)->Z, (flags)); \
111 * This functions computes (in constant time) a point multiplication over the
114 * At a high level, it is Montgomery ladder with conditional swaps.
116 * It performs either a fixed scalar point multiplication
117 * (scalar * generator)
118 * when point is NULL, or a generic scalar point multiplication
120 * when point is not NULL.
122 * scalar should be in the range [0,n) otherwise all constant time bets are off.
124 * NB: This says nothing about EC_POINT_add and EC_POINT_dbl,
125 * which of course are not constant time themselves.
127 * The product is stored in r.
129 * Returns 1 on success, 0 otherwise.
131 static int ec_mul_consttime(const EC_GROUP *group, EC_POINT *r,
132 const BIGNUM *scalar, const EC_POINT *point,
135 int i, order_bits, group_top, kbit, pbit, Z_is_one;
138 BIGNUM *lambda = NULL;
139 BN_CTX *new_ctx = NULL;
142 if (ctx == NULL && (ctx = new_ctx = BN_CTX_secure_new()) == NULL)
145 if ((group->order == NULL) || (group->field == NULL))
148 order_bits = BN_num_bits(group->order);
150 s = EC_POINT_new(group);
155 if (group->generator == NULL)
157 if (!EC_POINT_copy(s, group->generator))
160 if (!EC_POINT_copy(s, point))
164 EC_POINT_BN_set_flags(s, BN_FLG_CONSTTIME);
167 lambda = BN_CTX_get(ctx);
173 * Group orders are often on a word boundary.
174 * So when we pad the scalar, some timing diff might
175 * pop if it needs to be expanded due to carries.
176 * So expand ahead of time.
178 group_top = bn_get_top(group->order);
179 if ((bn_wexpand(k, group_top + 1) == NULL)
180 || (bn_wexpand(lambda, group_top + 1) == NULL))
183 if (!BN_copy(k, scalar))
186 BN_set_flags(k, BN_FLG_CONSTTIME);
188 if ((BN_num_bits(k) > order_bits) || (BN_is_negative(k))) {
190 * this is an unusual input, and we don't guarantee
193 if (!BN_nnmod(k, k, group->order, ctx))
197 if (!BN_add(lambda, k, group->order))
199 BN_set_flags(lambda, BN_FLG_CONSTTIME);
200 if (!BN_add(k, lambda, group->order))
203 * lambda := scalar + order
204 * k := scalar + 2*order
206 kbit = BN_is_bit_set(lambda, order_bits);
207 BN_consttime_swap(kbit, k, lambda, group_top + 1);
209 group_top = bn_get_top(group->field);
210 if ((bn_wexpand(s->X, group_top) == NULL)
211 || (bn_wexpand(s->Y, group_top) == NULL)
212 || (bn_wexpand(s->Z, group_top) == NULL)
213 || (bn_wexpand(r->X, group_top) == NULL)
214 || (bn_wexpand(r->Y, group_top) == NULL)
215 || (bn_wexpand(r->Z, group_top) == NULL))
218 /* top bit is a 1, in a fixed pos */
219 if (!EC_POINT_copy(r, s))
222 EC_POINT_BN_set_flags(r, BN_FLG_CONSTTIME);
224 if (!EC_POINT_dbl(group, s, s, ctx))
229 #define EC_POINT_CSWAP(c, a, b, w, t) do { \
230 BN_consttime_swap(c, (a)->X, (b)->X, w); \
231 BN_consttime_swap(c, (a)->Y, (b)->Y, w); \
232 BN_consttime_swap(c, (a)->Z, (b)->Z, w); \
233 t = ((a)->Z_is_one ^ (b)->Z_is_one) & (c); \
234 (a)->Z_is_one ^= (t); \
235 (b)->Z_is_one ^= (t); \
239 * The ladder step, with branches, is
241 * k[i] == 0: S = add(R, S), R = dbl(R)
242 * k[i] == 1: R = add(S, R), S = dbl(S)
244 * Swapping R, S conditionally on k[i] leaves you with state
246 * k[i] == 0: T, U = R, S
247 * k[i] == 1: T, U = S, R
249 * Then perform the ECC ops.
254 * Which leaves you with state
256 * k[i] == 0: U = add(R, S), T = dbl(R)
257 * k[i] == 1: U = add(S, R), T = dbl(S)
259 * Swapping T, U conditionally on k[i] leaves you with state
261 * k[i] == 0: R, S = T, U
262 * k[i] == 1: R, S = U, T
264 * Which leaves you with state
266 * k[i] == 0: S = add(R, S), R = dbl(R)
267 * k[i] == 1: R = add(S, R), S = dbl(S)
269 * So we get the same logic, but instead of a branch it's a
270 * conditional swap, followed by ECC ops, then another conditional swap.
272 * Optimization: The end of iteration i and start of i-1 looks like
279 * CSWAP(k[i-1], R, S)
281 * CSWAP(k[i-1], R, S)
284 * So instead of two contiguous swaps, you can merge the condition
285 * bits and do a single swap.
287 * k[i] k[i-1] Outcome
293 * This is XOR. pbit tracks the previous bit of k.
296 for (i = order_bits - 1; i >= 0; i--) {
297 kbit = BN_is_bit_set(k, i) ^ pbit;
298 EC_POINT_CSWAP(kbit, r, s, group_top, Z_is_one);
299 if (!EC_POINT_add(group, s, r, s, ctx))
301 if (!EC_POINT_dbl(group, r, r, ctx))
304 * pbit logic merges this cswap with that of the
309 /* one final cswap to move the right value into r */
310 EC_POINT_CSWAP(pbit, r, s, group_top, Z_is_one);
311 #undef EC_POINT_CSWAP
318 BN_CTX_free(new_ctx);
323 #undef EC_POINT_BN_set_flags
326 * TODO: table should be optimised for the wNAF-based implementation,
327 * sometimes smaller windows will give better performance (thus the
328 * boundaries should be increased)
330 #define EC_window_bits_for_scalar_size(b) \
341 * \sum scalars[i]*points[i],
344 * in the addition if scalar != NULL
346 int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *scalar,
347 size_t num, const EC_POINT *points[], const BIGNUM *scalars[],
350 BN_CTX *new_ctx = NULL;
351 const EC_POINT *generator = NULL;
352 EC_POINT *tmp = NULL;
354 size_t blocksize = 0, numblocks = 0; /* for wNAF splitting */
355 size_t pre_points_per_block = 0;
358 int r_is_inverted = 0;
359 int r_is_at_infinity = 1;
360 size_t *wsize = NULL; /* individual window sizes */
361 signed char **wNAF = NULL; /* individual wNAFs */
362 size_t *wNAF_len = NULL;
365 EC_POINT **val = NULL; /* precomputation */
367 EC_POINT ***val_sub = NULL; /* pointers to sub-arrays of 'val' or
368 * 'pre_comp->points' */
369 const EC_PRE_COMP *pre_comp = NULL;
370 int num_scalar = 0; /* flag: will be set to 1 if 'scalar' must be
371 * treated like other scalars, i.e.
372 * precomputation is not available */
376 * Handle the common cases where the scalar is secret, enforcing a constant
377 * time scalar multiplication algorithm.
379 if ((scalar != NULL) && (num == 0)) {
381 * In this case we want to compute scalar * GeneratorPoint: this
382 * codepath is reached most prominently by (ephemeral) key generation
383 * of EC cryptosystems (i.e. ECDSA keygen and sign setup, ECDH
384 * keygen/first half), where the scalar is always secret. This is why
385 * we ignore if BN_FLG_CONSTTIME is actually set and we always call the
386 * constant time version.
388 return ec_mul_consttime(group, r, scalar, NULL, ctx);
390 if ((scalar == NULL) && (num == 1)) {
392 * In this case we want to compute scalar * GenericPoint: this codepath
393 * is reached most prominently by the second half of ECDH, where the
394 * secret scalar is multiplied by the peer's public point. To protect
395 * the secret scalar, we ignore if BN_FLG_CONSTTIME is actually set and
396 * we always call the constant time version.
398 return ec_mul_consttime(group, r, scalars[0], points[0], ctx);
401 if (group->meth != r->meth) {
402 ECerr(EC_F_EC_WNAF_MUL, EC_R_INCOMPATIBLE_OBJECTS);
406 if ((scalar == NULL) && (num == 0)) {
407 return EC_POINT_set_to_infinity(group, r);
410 for (i = 0; i < num; i++) {
411 if (group->meth != points[i]->meth) {
412 ECerr(EC_F_EC_WNAF_MUL, EC_R_INCOMPATIBLE_OBJECTS);
418 ctx = new_ctx = BN_CTX_new();
423 if (scalar != NULL) {
424 generator = EC_GROUP_get0_generator(group);
425 if (generator == NULL) {
426 ECerr(EC_F_EC_WNAF_MUL, EC_R_UNDEFINED_GENERATOR);
430 /* look if we can use precomputed multiples of generator */
432 pre_comp = group->pre_comp.ec;
433 if (pre_comp && pre_comp->numblocks
434 && (EC_POINT_cmp(group, generator, pre_comp->points[0], ctx) ==
436 blocksize = pre_comp->blocksize;
439 * determine maximum number of blocks that wNAF splitting may
440 * yield (NB: maximum wNAF length is bit length plus one)
442 numblocks = (BN_num_bits(scalar) / blocksize) + 1;
445 * we cannot use more blocks than we have precomputation for
447 if (numblocks > pre_comp->numblocks)
448 numblocks = pre_comp->numblocks;
450 pre_points_per_block = (size_t)1 << (pre_comp->w - 1);
452 /* check that pre_comp looks sane */
453 if (pre_comp->num != (pre_comp->numblocks * pre_points_per_block)) {
454 ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
458 /* can't use precomputation */
461 num_scalar = 1; /* treat 'scalar' like 'num'-th element of
466 totalnum = num + numblocks;
468 wsize = OPENSSL_malloc(totalnum * sizeof(wsize[0]));
469 wNAF_len = OPENSSL_malloc(totalnum * sizeof(wNAF_len[0]));
470 /* include space for pivot */
471 wNAF = OPENSSL_malloc((totalnum + 1) * sizeof(wNAF[0]));
472 val_sub = OPENSSL_malloc(totalnum * sizeof(val_sub[0]));
474 /* Ensure wNAF is initialised in case we end up going to err */
476 wNAF[0] = NULL; /* preliminary pivot */
478 if (wsize == NULL || wNAF_len == NULL || wNAF == NULL || val_sub == NULL) {
479 ECerr(EC_F_EC_WNAF_MUL, ERR_R_MALLOC_FAILURE);
484 * num_val will be the total number of temporarily precomputed points
488 for (i = 0; i < num + num_scalar; i++) {
491 bits = i < num ? BN_num_bits(scalars[i]) : BN_num_bits(scalar);
492 wsize[i] = EC_window_bits_for_scalar_size(bits);
493 num_val += (size_t)1 << (wsize[i] - 1);
494 wNAF[i + 1] = NULL; /* make sure we always have a pivot */
496 bn_compute_wNAF((i < num ? scalars[i] : scalar), wsize[i],
500 if (wNAF_len[i] > max_len)
501 max_len = wNAF_len[i];
505 /* we go here iff scalar != NULL */
507 if (pre_comp == NULL) {
508 if (num_scalar != 1) {
509 ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
512 /* we have already generated a wNAF for 'scalar' */
514 signed char *tmp_wNAF = NULL;
517 if (num_scalar != 0) {
518 ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
523 * use the window size for which we have precomputation
525 wsize[num] = pre_comp->w;
526 tmp_wNAF = bn_compute_wNAF(scalar, wsize[num], &tmp_len);
530 if (tmp_len <= max_len) {
532 * One of the other wNAFs is at least as long as the wNAF
533 * belonging to the generator, so wNAF splitting will not buy
538 totalnum = num + 1; /* don't use wNAF splitting */
539 wNAF[num] = tmp_wNAF;
540 wNAF[num + 1] = NULL;
541 wNAF_len[num] = tmp_len;
543 * pre_comp->points starts with the points that we need here:
545 val_sub[num] = pre_comp->points;
548 * don't include tmp_wNAF directly into wNAF array - use wNAF
549 * splitting and include the blocks
553 EC_POINT **tmp_points;
555 if (tmp_len < numblocks * blocksize) {
557 * possibly we can do with fewer blocks than estimated
559 numblocks = (tmp_len + blocksize - 1) / blocksize;
560 if (numblocks > pre_comp->numblocks) {
561 ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
562 OPENSSL_free(tmp_wNAF);
565 totalnum = num + numblocks;
568 /* split wNAF in 'numblocks' parts */
570 tmp_points = pre_comp->points;
572 for (i = num; i < totalnum; i++) {
573 if (i < totalnum - 1) {
574 wNAF_len[i] = blocksize;
575 if (tmp_len < blocksize) {
576 ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
577 OPENSSL_free(tmp_wNAF);
580 tmp_len -= blocksize;
583 * last block gets whatever is left (this could be
584 * more or less than 'blocksize'!)
586 wNAF_len[i] = tmp_len;
589 wNAF[i] = OPENSSL_malloc(wNAF_len[i]);
590 if (wNAF[i] == NULL) {
591 ECerr(EC_F_EC_WNAF_MUL, ERR_R_MALLOC_FAILURE);
592 OPENSSL_free(tmp_wNAF);
595 memcpy(wNAF[i], pp, wNAF_len[i]);
596 if (wNAF_len[i] > max_len)
597 max_len = wNAF_len[i];
599 if (*tmp_points == NULL) {
600 ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
601 OPENSSL_free(tmp_wNAF);
604 val_sub[i] = tmp_points;
605 tmp_points += pre_points_per_block;
608 OPENSSL_free(tmp_wNAF);
614 * All points we precompute now go into a single array 'val'.
615 * 'val_sub[i]' is a pointer to the subarray for the i-th point, or to a
616 * subarray of 'pre_comp->points' if we already have precomputation.
618 val = OPENSSL_malloc((num_val + 1) * sizeof(val[0]));
620 ECerr(EC_F_EC_WNAF_MUL, ERR_R_MALLOC_FAILURE);
623 val[num_val] = NULL; /* pivot element */
625 /* allocate points for precomputation */
627 for (i = 0; i < num + num_scalar; i++) {
629 for (j = 0; j < ((size_t)1 << (wsize[i] - 1)); j++) {
630 *v = EC_POINT_new(group);
636 if (!(v == val + num_val)) {
637 ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
641 if ((tmp = EC_POINT_new(group)) == NULL)
645 * prepare precomputed values:
646 * val_sub[i][0] := points[i]
647 * val_sub[i][1] := 3 * points[i]
648 * val_sub[i][2] := 5 * points[i]
651 for (i = 0; i < num + num_scalar; i++) {
653 if (!EC_POINT_copy(val_sub[i][0], points[i]))
656 if (!EC_POINT_copy(val_sub[i][0], generator))
661 if (!EC_POINT_dbl(group, tmp, val_sub[i][0], ctx))
663 for (j = 1; j < ((size_t)1 << (wsize[i] - 1)); j++) {
665 (group, val_sub[i][j], val_sub[i][j - 1], tmp, ctx))
671 if (!EC_POINTs_make_affine(group, num_val, val, ctx))
674 r_is_at_infinity = 1;
676 for (k = max_len - 1; k >= 0; k--) {
677 if (!r_is_at_infinity) {
678 if (!EC_POINT_dbl(group, r, r, ctx))
682 for (i = 0; i < totalnum; i++) {
683 if (wNAF_len[i] > (size_t)k) {
684 int digit = wNAF[i][k];
693 if (is_neg != r_is_inverted) {
694 if (!r_is_at_infinity) {
695 if (!EC_POINT_invert(group, r, ctx))
698 r_is_inverted = !r_is_inverted;
703 if (r_is_at_infinity) {
704 if (!EC_POINT_copy(r, val_sub[i][digit >> 1]))
706 r_is_at_infinity = 0;
709 (group, r, r, val_sub[i][digit >> 1], ctx))
717 if (r_is_at_infinity) {
718 if (!EC_POINT_set_to_infinity(group, r))
722 if (!EC_POINT_invert(group, r, ctx))
729 BN_CTX_free(new_ctx);
732 OPENSSL_free(wNAF_len);
736 for (w = wNAF; *w != NULL; w++)
742 for (v = val; *v != NULL; v++)
743 EC_POINT_clear_free(*v);
747 OPENSSL_free(val_sub);
752 * ec_wNAF_precompute_mult()
753 * creates an EC_PRE_COMP object with preprecomputed multiples of the generator
754 * for use with wNAF splitting as implemented in ec_wNAF_mul().
756 * 'pre_comp->points' is an array of multiples of the generator
757 * of the following form:
758 * points[0] = generator;
759 * points[1] = 3 * generator;
761 * points[2^(w-1)-1] = (2^(w-1)-1) * generator;
762 * points[2^(w-1)] = 2^blocksize * generator;
763 * points[2^(w-1)+1] = 3 * 2^blocksize * generator;
765 * points[2^(w-1)*(numblocks-1)-1] = (2^(w-1)) * 2^(blocksize*(numblocks-2)) * generator
766 * points[2^(w-1)*(numblocks-1)] = 2^(blocksize*(numblocks-1)) * generator
768 * points[2^(w-1)*numblocks-1] = (2^(w-1)) * 2^(blocksize*(numblocks-1)) * generator
769 * points[2^(w-1)*numblocks] = NULL
771 int ec_wNAF_precompute_mult(EC_GROUP *group, BN_CTX *ctx)
773 const EC_POINT *generator;
774 EC_POINT *tmp_point = NULL, *base = NULL, **var;
775 BN_CTX *new_ctx = NULL;
777 size_t i, bits, w, pre_points_per_block, blocksize, numblocks, num;
778 EC_POINT **points = NULL;
779 EC_PRE_COMP *pre_comp;
782 /* if there is an old EC_PRE_COMP object, throw it away */
783 EC_pre_comp_free(group);
784 if ((pre_comp = ec_pre_comp_new(group)) == NULL)
787 generator = EC_GROUP_get0_generator(group);
788 if (generator == NULL) {
789 ECerr(EC_F_EC_WNAF_PRECOMPUTE_MULT, EC_R_UNDEFINED_GENERATOR);
794 ctx = new_ctx = BN_CTX_new();
801 order = EC_GROUP_get0_order(group);
804 if (BN_is_zero(order)) {
805 ECerr(EC_F_EC_WNAF_PRECOMPUTE_MULT, EC_R_UNKNOWN_ORDER);
809 bits = BN_num_bits(order);
811 * The following parameters mean we precompute (approximately) one point
812 * per bit. TBD: The combination 8, 4 is perfect for 160 bits; for other
813 * bit lengths, other parameter combinations might provide better
818 if (EC_window_bits_for_scalar_size(bits) > w) {
819 /* let's not make the window too small ... */
820 w = EC_window_bits_for_scalar_size(bits);
823 numblocks = (bits + blocksize - 1) / blocksize; /* max. number of blocks
827 pre_points_per_block = (size_t)1 << (w - 1);
828 num = pre_points_per_block * numblocks; /* number of points to compute
831 points = OPENSSL_malloc(sizeof(*points) * (num + 1));
832 if (points == NULL) {
833 ECerr(EC_F_EC_WNAF_PRECOMPUTE_MULT, ERR_R_MALLOC_FAILURE);
838 var[num] = NULL; /* pivot */
839 for (i = 0; i < num; i++) {
840 if ((var[i] = EC_POINT_new(group)) == NULL) {
841 ECerr(EC_F_EC_WNAF_PRECOMPUTE_MULT, ERR_R_MALLOC_FAILURE);
846 if ((tmp_point = EC_POINT_new(group)) == NULL
847 || (base = EC_POINT_new(group)) == NULL) {
848 ECerr(EC_F_EC_WNAF_PRECOMPUTE_MULT, ERR_R_MALLOC_FAILURE);
852 if (!EC_POINT_copy(base, generator))
855 /* do the precomputation */
856 for (i = 0; i < numblocks; i++) {
859 if (!EC_POINT_dbl(group, tmp_point, base, ctx))
862 if (!EC_POINT_copy(*var++, base))
865 for (j = 1; j < pre_points_per_block; j++, var++) {
867 * calculate odd multiples of the current base point
869 if (!EC_POINT_add(group, *var, tmp_point, *(var - 1), ctx))
873 if (i < numblocks - 1) {
875 * get the next base (multiply current one by 2^blocksize)
879 if (blocksize <= 2) {
880 ECerr(EC_F_EC_WNAF_PRECOMPUTE_MULT, ERR_R_INTERNAL_ERROR);
884 if (!EC_POINT_dbl(group, base, tmp_point, ctx))
886 for (k = 2; k < blocksize; k++) {
887 if (!EC_POINT_dbl(group, base, base, ctx))
893 if (!EC_POINTs_make_affine(group, num, points, ctx))
896 pre_comp->group = group;
897 pre_comp->blocksize = blocksize;
898 pre_comp->numblocks = numblocks;
900 pre_comp->points = points;
903 SETPRECOMP(group, ec, pre_comp);
910 BN_CTX_free(new_ctx);
911 EC_ec_pre_comp_free(pre_comp);
915 for (p = points; *p != NULL; p++)
917 OPENSSL_free(points);
919 EC_POINT_free(tmp_point);
924 int ec_wNAF_have_precompute_mult(const EC_GROUP *group)
926 return HAVEPRECOMP(group, ec);