2 * Copyright 1995-2017 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the OpenSSL license (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
10 #include "internal/cryptlib.h"
11 #include "internal/constant_time_locl.h"
18 # define alloca _alloca
20 #elif defined(__GNUC__)
22 # define alloca(s) __builtin_alloca((s))
31 #if defined(OPENSSL_BN_ASM_MONT) && (defined(__sparc__) || defined(__sparc))
32 # include "sparc_arch.h"
33 extern unsigned int OPENSSL_sparcv9cap_P[];
34 # define SPARC_T4_MONT
37 /* maximum precomputation table size for *variable* sliding windows */
40 /* this one works - simple but works */
41 int BN_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
46 if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0) {
47 /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */
48 BNerr(BN_F_BN_EXP, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
53 rr = ((r == a) || (r == p)) ? BN_CTX_get(ctx) : r;
55 if (rr == NULL || v == NULL)
58 if (BN_copy(v, a) == NULL)
60 bits = BN_num_bits(p);
63 if (BN_copy(rr, a) == NULL)
70 for (i = 1; i < bits; i++) {
71 if (!BN_sqr(v, v, ctx))
73 if (BN_is_bit_set(p, i)) {
74 if (!BN_mul(rr, rr, v, ctx))
78 if (r != rr && BN_copy(r, rr) == NULL)
88 int BN_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m,
98 * For even modulus m = 2^k*m_odd, it might make sense to compute
99 * a^p mod m_odd and a^p mod 2^k separately (with Montgomery
100 * exponentiation for the odd part), using appropriate exponent
101 * reductions, and combine the results using the CRT.
103 * For now, we use Montgomery only if the modulus is odd; otherwise,
104 * exponentiation using the reciprocal-based quick remaindering
107 * (Timing obtained with expspeed.c [computations a^p mod m
108 * where a, p, m are of the same length: 256, 512, 1024, 2048,
109 * 4096, 8192 bits], compared to the running time of the
110 * standard algorithm:
112 * BN_mod_exp_mont 33 .. 40 % [AMD K6-2, Linux, debug configuration]
113 * 55 .. 77 % [UltraSparc processor, but
114 * debug-solaris-sparcv8-gcc conf.]
116 * BN_mod_exp_recp 50 .. 70 % [AMD K6-2, Linux, debug configuration]
117 * 62 .. 118 % [UltraSparc, debug-solaris-sparcv8-gcc]
119 * On the Sparc, BN_mod_exp_recp was faster than BN_mod_exp_mont
120 * at 2048 and more bits, but at 512 and 1024 bits, it was
121 * slower even than the standard algorithm!
123 * "Real" timings [linux-elf, solaris-sparcv9-gcc configurations]
124 * should be obtained when the new Montgomery reduction code
125 * has been integrated into OpenSSL.)
129 #define MONT_EXP_WORD
134 # ifdef MONT_EXP_WORD
135 if (a->top == 1 && !a->neg
136 && (BN_get_flags(p, BN_FLG_CONSTTIME) == 0)) {
137 BN_ULONG A = a->d[0];
138 ret = BN_mod_exp_mont_word(r, A, p, m, ctx, NULL);
141 ret = BN_mod_exp_mont(r, a, p, m, ctx, NULL);
146 ret = BN_mod_exp_recp(r, a, p, m, ctx);
150 ret = BN_mod_exp_simple(r, a, p, m, ctx);
158 int BN_mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
159 const BIGNUM *m, BN_CTX *ctx)
161 int i, j, bits, ret = 0, wstart, wend, window, wvalue;
164 /* Table of variables obtained from 'ctx' */
165 BIGNUM *val[TABLE_SIZE];
168 if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0) {
169 /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */
170 BNerr(BN_F_BN_MOD_EXP_RECP, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
174 bits = BN_num_bits(p);
176 /* x**0 mod 1 is still zero. */
187 aa = BN_CTX_get(ctx);
188 val[0] = BN_CTX_get(ctx);
192 BN_RECP_CTX_init(&recp);
194 /* ignore sign of 'm' */
198 if (BN_RECP_CTX_set(&recp, aa, ctx) <= 0)
201 if (BN_RECP_CTX_set(&recp, m, ctx) <= 0)
205 if (!BN_nnmod(val[0], a, m, ctx))
207 if (BN_is_zero(val[0])) {
213 window = BN_window_bits_for_exponent_size(bits);
215 if (!BN_mod_mul_reciprocal(aa, val[0], val[0], &recp, ctx))
217 j = 1 << (window - 1);
218 for (i = 1; i < j; i++) {
219 if (((val[i] = BN_CTX_get(ctx)) == NULL) ||
220 !BN_mod_mul_reciprocal(val[i], val[i - 1], aa, &recp, ctx))
225 start = 1; /* This is used to avoid multiplication etc
226 * when there is only the value '1' in the
228 wvalue = 0; /* The 'value' of the window */
229 wstart = bits - 1; /* The top bit of the window */
230 wend = 0; /* The bottom bit of the window */
236 if (BN_is_bit_set(p, wstart) == 0) {
238 if (!BN_mod_mul_reciprocal(r, r, r, &recp, ctx))
246 * We now have wstart on a 'set' bit, we now need to work out how bit
247 * a window to do. To do this we need to scan forward until the last
248 * set bit before the end of the window
253 for (i = 1; i < window; i++) {
256 if (BN_is_bit_set(p, wstart - i)) {
257 wvalue <<= (i - wend);
263 /* wend is the size of the current window */
265 /* add the 'bytes above' */
267 for (i = 0; i < j; i++) {
268 if (!BN_mod_mul_reciprocal(r, r, r, &recp, ctx))
272 /* wvalue will be an odd number < 2^window */
273 if (!BN_mod_mul_reciprocal(r, r, val[wvalue >> 1], &recp, ctx))
276 /* move the 'window' down further */
286 BN_RECP_CTX_free(&recp);
291 int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
292 const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *in_mont)
294 int i, j, bits, ret = 0, wstart, wend, window, wvalue;
298 /* Table of variables obtained from 'ctx' */
299 BIGNUM *val[TABLE_SIZE];
300 BN_MONT_CTX *mont = NULL;
302 if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0) {
303 return BN_mod_exp_mont_consttime(rr, a, p, m, ctx, in_mont);
311 BNerr(BN_F_BN_MOD_EXP_MONT, BN_R_CALLED_WITH_EVEN_MODULUS);
314 bits = BN_num_bits(p);
316 /* x**0 mod 1 is still zero. */
329 val[0] = BN_CTX_get(ctx);
334 * If this is not done, things will break in the montgomery part
340 if ((mont = BN_MONT_CTX_new()) == NULL)
342 if (!BN_MONT_CTX_set(mont, m, ctx))
346 if (a->neg || BN_ucmp(a, m) >= 0) {
347 if (!BN_nnmod(val[0], a, m, ctx))
352 if (BN_is_zero(aa)) {
357 if (!BN_to_montgomery(val[0], aa, mont, ctx))
360 window = BN_window_bits_for_exponent_size(bits);
362 if (!BN_mod_mul_montgomery(d, val[0], val[0], mont, ctx))
364 j = 1 << (window - 1);
365 for (i = 1; i < j; i++) {
366 if (((val[i] = BN_CTX_get(ctx)) == NULL) ||
367 !BN_mod_mul_montgomery(val[i], val[i - 1], d, mont, ctx))
372 start = 1; /* This is used to avoid multiplication etc
373 * when there is only the value '1' in the
375 wvalue = 0; /* The 'value' of the window */
376 wstart = bits - 1; /* The top bit of the window */
377 wend = 0; /* The bottom bit of the window */
379 #if 1 /* by Shay Gueron's suggestion */
380 j = m->top; /* borrow j */
381 if (m->d[j - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) {
382 if (bn_wexpand(r, j) == NULL)
384 /* 2^(top*BN_BITS2) - m */
385 r->d[0] = (0 - m->d[0]) & BN_MASK2;
386 for (i = 1; i < j; i++)
387 r->d[i] = (~m->d[i]) & BN_MASK2;
390 * Upper words will be zero if the corresponding words of 'm' were
391 * 0xfff[...], so decrement r->top accordingly.
396 if (!BN_to_montgomery(r, BN_value_one(), mont, ctx))
399 if (BN_is_bit_set(p, wstart) == 0) {
401 if (!BN_mod_mul_montgomery(r, r, r, mont, ctx))
410 * We now have wstart on a 'set' bit, we now need to work out how bit
411 * a window to do. To do this we need to scan forward until the last
412 * set bit before the end of the window
417 for (i = 1; i < window; i++) {
420 if (BN_is_bit_set(p, wstart - i)) {
421 wvalue <<= (i - wend);
427 /* wend is the size of the current window */
429 /* add the 'bytes above' */
431 for (i = 0; i < j; i++) {
432 if (!BN_mod_mul_montgomery(r, r, r, mont, ctx))
436 /* wvalue will be an odd number < 2^window */
437 if (!BN_mod_mul_montgomery(r, r, val[wvalue >> 1], mont, ctx))
440 /* move the 'window' down further */
447 #if defined(SPARC_T4_MONT)
448 if (OPENSSL_sparcv9cap_P[0] & (SPARCV9_VIS3 | SPARCV9_PREFER_FPU)) {
449 j = mont->N.top; /* borrow j */
450 val[0]->d[0] = 1; /* borrow val[0] */
451 for (i = 1; i < j; i++)
454 if (!BN_mod_mul_montgomery(rr, r, val[0], mont, ctx))
458 if (!BN_from_montgomery(rr, r, mont, ctx))
463 BN_MONT_CTX_free(mont);
469 #if defined(SPARC_T4_MONT)
470 static BN_ULONG bn_get_bits(const BIGNUM *a, int bitpos)
475 wordpos = bitpos / BN_BITS2;
477 if (wordpos >= 0 && wordpos < a->top) {
478 ret = a->d[wordpos] & BN_MASK2;
481 if (++wordpos < a->top)
482 ret |= a->d[wordpos] << (BN_BITS2 - bitpos);
486 return ret & BN_MASK2;
491 * BN_mod_exp_mont_consttime() stores the precomputed powers in a specific
492 * layout so that accessing any of these table values shows the same access
493 * pattern as far as cache lines are concerned. The following functions are
494 * used to transfer a BIGNUM from/to that table.
497 static int MOD_EXP_CTIME_COPY_TO_PREBUF(const BIGNUM *b, int top,
498 unsigned char *buf, int idx,
502 int width = 1 << window;
503 BN_ULONG *table = (BN_ULONG *)buf;
506 top = b->top; /* this works because 'buf' is explicitly
508 for (i = 0, j = idx; i < top; i++, j += width) {
515 static int MOD_EXP_CTIME_COPY_FROM_PREBUF(BIGNUM *b, int top,
516 unsigned char *buf, int idx,
520 int width = 1 << window;
522 * We declare table 'volatile' in order to discourage compiler
523 * from reordering loads from the table. Concern is that if
524 * reordered in specific manner loads might give away the
525 * information we are trying to conceal. Some would argue that
526 * compiler can reorder them anyway, but it can as well be
527 * argued that doing so would be violation of standard...
529 volatile BN_ULONG *table = (volatile BN_ULONG *)buf;
531 if (bn_wexpand(b, top) == NULL)
535 for (i = 0; i < top; i++, table += width) {
538 for (j = 0; j < width; j++) {
540 ((BN_ULONG)0 - (constant_time_eq_int(j,idx)&1));
546 int xstride = 1 << (window - 2);
547 BN_ULONG y0, y1, y2, y3;
549 i = idx >> (window - 2); /* equivalent of idx / xstride */
550 idx &= xstride - 1; /* equivalent of idx % xstride */
552 y0 = (BN_ULONG)0 - (constant_time_eq_int(i,0)&1);
553 y1 = (BN_ULONG)0 - (constant_time_eq_int(i,1)&1);
554 y2 = (BN_ULONG)0 - (constant_time_eq_int(i,2)&1);
555 y3 = (BN_ULONG)0 - (constant_time_eq_int(i,3)&1);
557 for (i = 0; i < top; i++, table += width) {
560 for (j = 0; j < xstride; j++) {
561 acc |= ( (table[j + 0 * xstride] & y0) |
562 (table[j + 1 * xstride] & y1) |
563 (table[j + 2 * xstride] & y2) |
564 (table[j + 3 * xstride] & y3) )
565 & ((BN_ULONG)0 - (constant_time_eq_int(j,idx)&1));
578 * Given a pointer value, compute the next address that is a cache line
581 #define MOD_EXP_CTIME_ALIGN(x_) \
582 ((unsigned char*)(x_) + (MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH - (((size_t)(x_)) & (MOD_EXP_CTIME_MIN_CACHE_LINE_MASK))))
585 * This variant of BN_mod_exp_mont() uses fixed windows and the special
586 * precomputation memory layout to limit data-dependency to a minimum to
587 * protect secret exponents (cf. the hyper-threading timing attacks pointed
588 * out by Colin Percival,
589 * http://www.daemonology.net/hyperthreading-considered-harmful/)
591 int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
592 const BIGNUM *m, BN_CTX *ctx,
593 BN_MONT_CTX *in_mont)
595 int i, bits, ret = 0, window, wvalue;
597 BN_MONT_CTX *mont = NULL;
600 unsigned char *powerbufFree = NULL;
602 unsigned char *powerbuf = NULL;
604 #if defined(SPARC_T4_MONT)
613 BNerr(BN_F_BN_MOD_EXP_MONT_CONSTTIME, BN_R_CALLED_WITH_EVEN_MODULUS);
619 bits = BN_num_bits(p);
621 /* x**0 mod 1 is still zero. */
634 * Allocate a montgomery context if it was not supplied by the caller. If
635 * this is not done, things will break in the montgomery part.
640 if ((mont = BN_MONT_CTX_new()) == NULL)
642 if (!BN_MONT_CTX_set(mont, m, ctx))
648 * If the size of the operands allow it, perform the optimized
649 * RSAZ exponentiation. For further information see
650 * crypto/bn/rsaz_exp.c and accompanying assembly modules.
652 if ((16 == a->top) && (16 == p->top) && (BN_num_bits(m) == 1024)
653 && rsaz_avx2_eligible()) {
654 if (NULL == bn_wexpand(rr, 16))
656 RSAZ_1024_mod_exp_avx2(rr->d, a->d, p->d, m->d, mont->RR.d,
663 } else if ((8 == a->top) && (8 == p->top) && (BN_num_bits(m) == 512)) {
664 if (NULL == bn_wexpand(rr, 8))
666 RSAZ_512_mod_exp(rr->d, a->d, p->d, m->d, mont->n0[0], mont->RR.d);
675 /* Get the window size to use with size of p. */
676 window = BN_window_bits_for_ctime_exponent_size(bits);
677 #if defined(SPARC_T4_MONT)
678 if (window >= 5 && (top & 15) == 0 && top <= 64 &&
679 (OPENSSL_sparcv9cap_P[1] & (CFR_MONTMUL | CFR_MONTSQR)) ==
680 (CFR_MONTMUL | CFR_MONTSQR) && (t4 = OPENSSL_sparcv9cap_P[0]))
684 #if defined(OPENSSL_BN_ASM_MONT5)
686 window = 5; /* ~5% improvement for RSA2048 sign, and even
688 /* reserve space for mont->N.d[] copy */
689 powerbufLen += top * sizeof(mont->N.d[0]);
695 * Allocate a buffer large enough to hold all of the pre-computed powers
696 * of am, am itself and tmp.
698 numPowers = 1 << window;
699 powerbufLen += sizeof(m->d[0]) * (top * numPowers +
701 numPowers ? (2 * top) : numPowers));
703 if (powerbufLen < 3072)
705 alloca(powerbufLen + MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH);
709 OPENSSL_malloc(powerbufLen + MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH))
713 powerbuf = MOD_EXP_CTIME_ALIGN(powerbufFree);
714 memset(powerbuf, 0, powerbufLen);
717 if (powerbufLen < 3072)
721 /* lay down tmp and am right after powers table */
722 tmp.d = (BN_ULONG *)(powerbuf + sizeof(m->d[0]) * top * numPowers);
724 tmp.top = am.top = 0;
725 tmp.dmax = am.dmax = top;
726 tmp.neg = am.neg = 0;
727 tmp.flags = am.flags = BN_FLG_STATIC_DATA;
729 /* prepare a^0 in Montgomery domain */
730 #if 1 /* by Shay Gueron's suggestion */
731 if (m->d[top - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) {
732 /* 2^(top*BN_BITS2) - m */
733 tmp.d[0] = (0 - m->d[0]) & BN_MASK2;
734 for (i = 1; i < top; i++)
735 tmp.d[i] = (~m->d[i]) & BN_MASK2;
739 if (!BN_to_montgomery(&tmp, BN_value_one(), mont, ctx))
742 /* prepare a^1 in Montgomery domain */
743 if (a->neg || BN_ucmp(a, m) >= 0) {
744 if (!BN_mod(&am, a, m, ctx))
746 if (!BN_to_montgomery(&am, &am, mont, ctx))
748 } else if (!BN_to_montgomery(&am, a, mont, ctx))
751 #if defined(SPARC_T4_MONT)
753 typedef int (*bn_pwr5_mont_f) (BN_ULONG *tp, const BN_ULONG *np,
754 const BN_ULONG *n0, const void *table,
755 int power, int bits);
756 int bn_pwr5_mont_t4_8(BN_ULONG *tp, const BN_ULONG *np,
757 const BN_ULONG *n0, const void *table,
758 int power, int bits);
759 int bn_pwr5_mont_t4_16(BN_ULONG *tp, const BN_ULONG *np,
760 const BN_ULONG *n0, const void *table,
761 int power, int bits);
762 int bn_pwr5_mont_t4_24(BN_ULONG *tp, const BN_ULONG *np,
763 const BN_ULONG *n0, const void *table,
764 int power, int bits);
765 int bn_pwr5_mont_t4_32(BN_ULONG *tp, const BN_ULONG *np,
766 const BN_ULONG *n0, const void *table,
767 int power, int bits);
768 static const bn_pwr5_mont_f pwr5_funcs[4] = {
769 bn_pwr5_mont_t4_8, bn_pwr5_mont_t4_16,
770 bn_pwr5_mont_t4_24, bn_pwr5_mont_t4_32
772 bn_pwr5_mont_f pwr5_worker = pwr5_funcs[top / 16 - 1];
774 typedef int (*bn_mul_mont_f) (BN_ULONG *rp, const BN_ULONG *ap,
775 const void *bp, const BN_ULONG *np,
777 int bn_mul_mont_t4_8(BN_ULONG *rp, const BN_ULONG *ap, const void *bp,
778 const BN_ULONG *np, const BN_ULONG *n0);
779 int bn_mul_mont_t4_16(BN_ULONG *rp, const BN_ULONG *ap,
780 const void *bp, const BN_ULONG *np,
782 int bn_mul_mont_t4_24(BN_ULONG *rp, const BN_ULONG *ap,
783 const void *bp, const BN_ULONG *np,
785 int bn_mul_mont_t4_32(BN_ULONG *rp, const BN_ULONG *ap,
786 const void *bp, const BN_ULONG *np,
788 static const bn_mul_mont_f mul_funcs[4] = {
789 bn_mul_mont_t4_8, bn_mul_mont_t4_16,
790 bn_mul_mont_t4_24, bn_mul_mont_t4_32
792 bn_mul_mont_f mul_worker = mul_funcs[top / 16 - 1];
794 void bn_mul_mont_vis3(BN_ULONG *rp, const BN_ULONG *ap,
795 const void *bp, const BN_ULONG *np,
796 const BN_ULONG *n0, int num);
797 void bn_mul_mont_t4(BN_ULONG *rp, const BN_ULONG *ap,
798 const void *bp, const BN_ULONG *np,
799 const BN_ULONG *n0, int num);
800 void bn_mul_mont_gather5_t4(BN_ULONG *rp, const BN_ULONG *ap,
801 const void *table, const BN_ULONG *np,
802 const BN_ULONG *n0, int num, int power);
803 void bn_flip_n_scatter5_t4(const BN_ULONG *inp, size_t num,
804 void *table, size_t power);
805 void bn_gather5_t4(BN_ULONG *out, size_t num,
806 void *table, size_t power);
807 void bn_flip_t4(BN_ULONG *dst, BN_ULONG *src, size_t num);
809 BN_ULONG *np = mont->N.d, *n0 = mont->n0;
810 int stride = 5 * (6 - (top / 16 - 1)); /* multiple of 5, but less
814 * BN_to_montgomery can contaminate words above .top [in
815 * BN_DEBUG[_DEBUG] build]...
817 for (i = am.top; i < top; i++)
819 for (i = tmp.top; i < top; i++)
822 bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, 0);
823 bn_flip_n_scatter5_t4(am.d, top, powerbuf, 1);
824 if (!(*mul_worker) (tmp.d, am.d, am.d, np, n0) &&
825 !(*mul_worker) (tmp.d, am.d, am.d, np, n0))
826 bn_mul_mont_vis3(tmp.d, am.d, am.d, np, n0, top);
827 bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, 2);
829 for (i = 3; i < 32; i++) {
830 /* Calculate a^i = a^(i-1) * a */
831 if (!(*mul_worker) (tmp.d, tmp.d, am.d, np, n0) &&
832 !(*mul_worker) (tmp.d, tmp.d, am.d, np, n0))
833 bn_mul_mont_vis3(tmp.d, tmp.d, am.d, np, n0, top);
834 bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, i);
837 /* switch to 64-bit domain */
838 np = alloca(top * sizeof(BN_ULONG));
840 bn_flip_t4(np, mont->N.d, top);
843 for (wvalue = 0, i = bits % 5; i >= 0; i--, bits--)
844 wvalue = (wvalue << 1) + BN_is_bit_set(p, bits);
845 bn_gather5_t4(tmp.d, top, powerbuf, wvalue);
848 * Scan the exponent one window at a time starting from the most
855 wvalue = bn_get_bits(p, bits + 1);
857 if ((*pwr5_worker) (tmp.d, np, n0, powerbuf, wvalue, stride))
859 /* retry once and fall back */
860 if ((*pwr5_worker) (tmp.d, np, n0, powerbuf, wvalue, stride))
864 wvalue >>= stride - 5;
866 bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
867 bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
868 bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
869 bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
870 bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
871 bn_mul_mont_gather5_t4(tmp.d, tmp.d, powerbuf, np, n0, top,
875 bn_flip_t4(tmp.d, tmp.d, top);
877 /* back to 32-bit domain */
879 bn_correct_top(&tmp);
880 OPENSSL_cleanse(np, top * sizeof(BN_ULONG));
883 #if defined(OPENSSL_BN_ASM_MONT5)
884 if (window == 5 && top > 1) {
886 * This optimization uses ideas from http://eprint.iacr.org/2011/239,
887 * specifically optimization of cache-timing attack countermeasures
888 * and pre-computation optimization.
892 * Dedicated window==4 case improves 512-bit RSA sign by ~15%, but as
893 * 512-bit RSA is hardly relevant, we omit it to spare size...
895 void bn_mul_mont_gather5(BN_ULONG *rp, const BN_ULONG *ap,
896 const void *table, const BN_ULONG *np,
897 const BN_ULONG *n0, int num, int power);
898 void bn_scatter5(const BN_ULONG *inp, size_t num,
899 void *table, size_t power);
900 void bn_gather5(BN_ULONG *out, size_t num, void *table, size_t power);
901 void bn_power5(BN_ULONG *rp, const BN_ULONG *ap,
902 const void *table, const BN_ULONG *np,
903 const BN_ULONG *n0, int num, int power);
904 int bn_get_bits5(const BN_ULONG *ap, int off);
905 int bn_from_montgomery(BN_ULONG *rp, const BN_ULONG *ap,
906 const BN_ULONG *not_used, const BN_ULONG *np,
907 const BN_ULONG *n0, int num);
909 BN_ULONG *n0 = mont->n0, *np;
912 * BN_to_montgomery can contaminate words above .top [in
913 * BN_DEBUG[_DEBUG] build]...
915 for (i = am.top; i < top; i++)
917 for (i = tmp.top; i < top; i++)
921 * copy mont->N.d[] to improve cache locality
923 for (np = am.d + top, i = 0; i < top; i++)
924 np[i] = mont->N.d[i];
926 bn_scatter5(tmp.d, top, powerbuf, 0);
927 bn_scatter5(am.d, am.top, powerbuf, 1);
928 bn_mul_mont(tmp.d, am.d, am.d, np, n0, top);
929 bn_scatter5(tmp.d, top, powerbuf, 2);
932 for (i = 3; i < 32; i++) {
933 /* Calculate a^i = a^(i-1) * a */
934 bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1);
935 bn_scatter5(tmp.d, top, powerbuf, i);
938 /* same as above, but uses squaring for 1/2 of operations */
939 for (i = 4; i < 32; i *= 2) {
940 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
941 bn_scatter5(tmp.d, top, powerbuf, i);
943 for (i = 3; i < 8; i += 2) {
945 bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1);
946 bn_scatter5(tmp.d, top, powerbuf, i);
947 for (j = 2 * i; j < 32; j *= 2) {
948 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
949 bn_scatter5(tmp.d, top, powerbuf, j);
952 for (; i < 16; i += 2) {
953 bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1);
954 bn_scatter5(tmp.d, top, powerbuf, i);
955 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
956 bn_scatter5(tmp.d, top, powerbuf, 2 * i);
958 for (; i < 32; i += 2) {
959 bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1);
960 bn_scatter5(tmp.d, top, powerbuf, i);
964 for (wvalue = 0, i = bits % 5; i >= 0; i--, bits--)
965 wvalue = (wvalue << 1) + BN_is_bit_set(p, bits);
966 bn_gather5(tmp.d, top, powerbuf, wvalue);
969 * Scan the exponent one window at a time starting from the most
974 for (wvalue = 0, i = 0; i < 5; i++, bits--)
975 wvalue = (wvalue << 1) + BN_is_bit_set(p, bits);
977 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
978 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
979 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
980 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
981 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
982 bn_mul_mont_gather5(tmp.d, tmp.d, powerbuf, np, n0, top,
986 wvalue = bn_get_bits5(p->d, bits - 4);
988 bn_power5(tmp.d, tmp.d, powerbuf, np, n0, top, wvalue);
992 ret = bn_from_montgomery(tmp.d, tmp.d, NULL, np, n0, top);
994 bn_correct_top(&tmp);
996 if (!BN_copy(rr, &tmp))
998 goto err; /* non-zero ret means it's not error */
1003 if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 0, window))
1005 if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&am, top, powerbuf, 1, window))
1009 * If the window size is greater than 1, then calculate
1010 * val[i=2..2^winsize-1]. Powers are computed as a*a^(i-1) (even
1011 * powers could instead be computed as (a^(i/2))^2 to use the slight
1012 * performance advantage of sqr over mul).
1015 if (!BN_mod_mul_montgomery(&tmp, &am, &am, mont, ctx))
1017 if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 2,
1020 for (i = 3; i < numPowers; i++) {
1021 /* Calculate a^i = a^(i-1) * a */
1022 if (!BN_mod_mul_montgomery(&tmp, &am, &tmp, mont, ctx))
1024 if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, i,
1031 for (wvalue = 0, i = bits % window; i >= 0; i--, bits--)
1032 wvalue = (wvalue << 1) + BN_is_bit_set(p, bits);
1033 if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&tmp, top, powerbuf, wvalue,
1038 * Scan the exponent one window at a time starting from the most
1042 wvalue = 0; /* The 'value' of the window */
1044 /* Scan the window, squaring the result as we go */
1045 for (i = 0; i < window; i++, bits--) {
1046 if (!BN_mod_mul_montgomery(&tmp, &tmp, &tmp, mont, ctx))
1048 wvalue = (wvalue << 1) + BN_is_bit_set(p, bits);
1052 * Fetch the appropriate pre-computed value from the pre-buf
1054 if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&am, top, powerbuf, wvalue,
1058 /* Multiply the result into the intermediate result */
1059 if (!BN_mod_mul_montgomery(&tmp, &tmp, &am, mont, ctx))
1064 /* Convert the final result from montgomery to standard format */
1065 #if defined(SPARC_T4_MONT)
1066 if (OPENSSL_sparcv9cap_P[0] & (SPARCV9_VIS3 | SPARCV9_PREFER_FPU)) {
1067 am.d[0] = 1; /* borrow am */
1068 for (i = 1; i < top; i++)
1070 if (!BN_mod_mul_montgomery(rr, &tmp, &am, mont, ctx))
1074 if (!BN_from_montgomery(rr, &tmp, mont, ctx))
1078 if (in_mont == NULL)
1079 BN_MONT_CTX_free(mont);
1080 if (powerbuf != NULL) {
1081 OPENSSL_cleanse(powerbuf, powerbufLen);
1082 OPENSSL_free(powerbufFree);
1088 int BN_mod_exp_mont_word(BIGNUM *rr, BN_ULONG a, const BIGNUM *p,
1089 const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *in_mont)
1091 BN_MONT_CTX *mont = NULL;
1092 int b, bits, ret = 0;
1097 #define BN_MOD_MUL_WORD(r, w, m) \
1098 (BN_mul_word(r, (w)) && \
1099 (/* BN_ucmp(r, (m)) < 0 ? 1 :*/ \
1100 (BN_mod(t, r, m, ctx) && (swap_tmp = r, r = t, t = swap_tmp, 1))))
1102 * BN_MOD_MUL_WORD is only used with 'w' large, so the BN_ucmp test is
1103 * probably more overhead than always using BN_mod (which uses BN_copy if
1104 * a similar test returns true).
1107 * We can use BN_mod and do not need BN_nnmod because our accumulator is
1108 * never negative (the result of BN_mod does not depend on the sign of
1111 #define BN_TO_MONTGOMERY_WORD(r, w, mont) \
1112 (BN_set_word(r, (w)) && BN_to_montgomery(r, r, (mont), ctx))
1114 if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0) {
1115 /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */
1116 BNerr(BN_F_BN_MOD_EXP_MONT_WORD, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
1123 if (!BN_is_odd(m)) {
1124 BNerr(BN_F_BN_MOD_EXP_MONT_WORD, BN_R_CALLED_WITH_EVEN_MODULUS);
1128 a %= m->d[0]; /* make sure that 'a' is reduced */
1130 bits = BN_num_bits(p);
1132 /* x**0 mod 1 is still zero. */
1148 r = BN_CTX_get(ctx);
1149 t = BN_CTX_get(ctx);
1153 if (in_mont != NULL)
1156 if ((mont = BN_MONT_CTX_new()) == NULL)
1158 if (!BN_MONT_CTX_set(mont, m, ctx))
1162 r_is_one = 1; /* except for Montgomery factor */
1166 /* The result is accumulated in the product r*w. */
1167 w = a; /* bit 'bits-1' of 'p' is always set */
1168 for (b = bits - 2; b >= 0; b--) {
1169 /* First, square r*w. */
1171 if ((next_w / w) != w) { /* overflow */
1173 if (!BN_TO_MONTGOMERY_WORD(r, w, mont))
1177 if (!BN_MOD_MUL_WORD(r, w, m))
1184 if (!BN_mod_mul_montgomery(r, r, r, mont, ctx))
1188 /* Second, multiply r*w by 'a' if exponent bit is set. */
1189 if (BN_is_bit_set(p, b)) {
1191 if ((next_w / a) != w) { /* overflow */
1193 if (!BN_TO_MONTGOMERY_WORD(r, w, mont))
1197 if (!BN_MOD_MUL_WORD(r, w, m))
1206 /* Finally, set r:=r*w. */
1209 if (!BN_TO_MONTGOMERY_WORD(r, w, mont))
1213 if (!BN_MOD_MUL_WORD(r, w, m))
1218 if (r_is_one) { /* can happen only if a == 1 */
1222 if (!BN_from_montgomery(rr, r, mont, ctx))
1227 if (in_mont == NULL)
1228 BN_MONT_CTX_free(mont);
1234 /* The old fallback, simple version :-) */
1235 int BN_mod_exp_simple(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
1236 const BIGNUM *m, BN_CTX *ctx)
1238 int i, j, bits, ret = 0, wstart, wend, window, wvalue;
1241 /* Table of variables obtained from 'ctx' */
1242 BIGNUM *val[TABLE_SIZE];
1244 if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0) {
1245 /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */
1246 BNerr(BN_F_BN_MOD_EXP_SIMPLE, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
1250 bits = BN_num_bits(p);
1252 /* x**0 mod 1 is still zero. */
1263 d = BN_CTX_get(ctx);
1264 val[0] = BN_CTX_get(ctx);
1268 if (!BN_nnmod(val[0], a, m, ctx))
1270 if (BN_is_zero(val[0])) {
1276 window = BN_window_bits_for_exponent_size(bits);
1278 if (!BN_mod_mul(d, val[0], val[0], m, ctx))
1280 j = 1 << (window - 1);
1281 for (i = 1; i < j; i++) {
1282 if (((val[i] = BN_CTX_get(ctx)) == NULL) ||
1283 !BN_mod_mul(val[i], val[i - 1], d, m, ctx))
1288 start = 1; /* This is used to avoid multiplication etc
1289 * when there is only the value '1' in the
1291 wvalue = 0; /* The 'value' of the window */
1292 wstart = bits - 1; /* The top bit of the window */
1293 wend = 0; /* The bottom bit of the window */
1299 if (BN_is_bit_set(p, wstart) == 0) {
1301 if (!BN_mod_mul(r, r, r, m, ctx))
1309 * We now have wstart on a 'set' bit, we now need to work out how bit
1310 * a window to do. To do this we need to scan forward until the last
1311 * set bit before the end of the window
1316 for (i = 1; i < window; i++) {
1319 if (BN_is_bit_set(p, wstart - i)) {
1320 wvalue <<= (i - wend);
1326 /* wend is the size of the current window */
1328 /* add the 'bytes above' */
1330 for (i = 0; i < j; i++) {
1331 if (!BN_mod_mul(r, r, r, m, ctx))
1335 /* wvalue will be an odd number < 2^window */
1336 if (!BN_mod_mul(r, r, val[wvalue >> 1], m, ctx))
1339 /* move the 'window' down further */