2 * Copyright 1995-2023 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
10 #include "internal/cryptlib.h"
11 #include "internal/constant_time.h"
18 # define alloca _alloca
20 #elif defined(__GNUC__)
22 # define alloca(s) __builtin_alloca((s))
31 #if defined(OPENSSL_BN_ASM_MONT) && (defined(__sparc__) || defined(__sparc))
32 # include "crypto/sparc_arch.h"
33 # define SPARC_T4_MONT
36 /* maximum precomputation table size for *variable* sliding windows */
40 * Beyond this limit the constant time code is disabled due to
41 * the possible overflow in the computation of powerbufLen in
42 * BN_mod_exp_mont_consttime.
43 * When this limit is exceeded, the computation will be done using
44 * non-constant time code, but it will take very long.
46 #define BN_CONSTTIME_SIZE_LIMIT (INT_MAX / BN_BYTES / 256)
48 /* this one works - simple but works */
49 int BN_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
54 if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0
55 || BN_get_flags(a, BN_FLG_CONSTTIME) != 0) {
56 /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */
57 ERR_raise(ERR_LIB_BN, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
62 rr = ((r == a) || (r == p)) ? BN_CTX_get(ctx) : r;
64 if (rr == NULL || v == NULL)
67 if (BN_copy(v, a) == NULL)
69 bits = BN_num_bits(p);
72 if (BN_copy(rr, a) == NULL)
79 for (i = 1; i < bits; i++) {
80 if (!BN_sqr(v, v, ctx))
82 if (BN_is_bit_set(p, i)) {
83 if (!BN_mul(rr, rr, v, ctx))
87 if (r != rr && BN_copy(r, rr) == NULL)
97 int BN_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m,
107 * For even modulus m = 2^k*m_odd, it might make sense to compute
108 * a^p mod m_odd and a^p mod 2^k separately (with Montgomery
109 * exponentiation for the odd part), using appropriate exponent
110 * reductions, and combine the results using the CRT.
112 * For now, we use Montgomery only if the modulus is odd; otherwise,
113 * exponentiation using the reciprocal-based quick remaindering
116 * (Timing obtained with expspeed.c [computations a^p mod m
117 * where a, p, m are of the same length: 256, 512, 1024, 2048,
118 * 4096, 8192 bits], compared to the running time of the
119 * standard algorithm:
121 * BN_mod_exp_mont 33 .. 40 % [AMD K6-2, Linux, debug configuration]
122 * 55 .. 77 % [UltraSparc processor, but
123 * debug-solaris-sparcv8-gcc conf.]
125 * BN_mod_exp_recp 50 .. 70 % [AMD K6-2, Linux, debug configuration]
126 * 62 .. 118 % [UltraSparc, debug-solaris-sparcv8-gcc]
128 * On the Sparc, BN_mod_exp_recp was faster than BN_mod_exp_mont
129 * at 2048 and more bits, but at 512 and 1024 bits, it was
130 * slower even than the standard algorithm!
132 * "Real" timings [linux-elf, solaris-sparcv9-gcc configurations]
133 * should be obtained when the new Montgomery reduction code
134 * has been integrated into OpenSSL.)
138 #define MONT_EXP_WORD
143 # ifdef MONT_EXP_WORD
144 if (a->top == 1 && !a->neg
145 && (BN_get_flags(p, BN_FLG_CONSTTIME) == 0)
146 && (BN_get_flags(a, BN_FLG_CONSTTIME) == 0)
147 && (BN_get_flags(m, BN_FLG_CONSTTIME) == 0)) {
148 BN_ULONG A = a->d[0];
149 ret = BN_mod_exp_mont_word(r, A, p, m, ctx, NULL);
152 ret = BN_mod_exp_mont(r, a, p, m, ctx, NULL);
157 ret = BN_mod_exp_recp(r, a, p, m, ctx);
161 ret = BN_mod_exp_simple(r, a, p, m, ctx);
169 int BN_mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
170 const BIGNUM *m, BN_CTX *ctx)
172 int i, j, bits, ret = 0, wstart, wend, window;
175 /* Table of variables obtained from 'ctx' */
176 BIGNUM *val[TABLE_SIZE];
179 if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0
180 || BN_get_flags(a, BN_FLG_CONSTTIME) != 0
181 || BN_get_flags(m, BN_FLG_CONSTTIME) != 0) {
182 /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */
183 ERR_raise(ERR_LIB_BN, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
187 bits = BN_num_bits(p);
189 /* x**0 mod 1, or x**0 mod -1 is still zero. */
190 if (BN_abs_is_word(m, 1)) {
199 BN_RECP_CTX_init(&recp);
202 aa = BN_CTX_get(ctx);
203 val[0] = BN_CTX_get(ctx);
208 /* ignore sign of 'm' */
212 if (BN_RECP_CTX_set(&recp, aa, ctx) <= 0)
215 if (BN_RECP_CTX_set(&recp, m, ctx) <= 0)
219 if (!BN_nnmod(val[0], a, m, ctx))
221 if (BN_is_zero(val[0])) {
227 window = BN_window_bits_for_exponent_size(bits);
229 if (!BN_mod_mul_reciprocal(aa, val[0], val[0], &recp, ctx))
231 j = 1 << (window - 1);
232 for (i = 1; i < j; i++) {
233 if (((val[i] = BN_CTX_get(ctx)) == NULL) ||
234 !BN_mod_mul_reciprocal(val[i], val[i - 1], aa, &recp, ctx))
239 start = 1; /* This is used to avoid multiplication etc
240 * when there is only the value '1' in the
242 wstart = bits - 1; /* The top bit of the window */
243 wend = 0; /* The bottom bit of the window */
246 BIGNUM *p_dup = BN_CTX_get(ctx);
248 if (p_dup == NULL || BN_copy(p_dup, p) == NULL)
257 int wvalue; /* The 'value' of the window */
259 if (BN_is_bit_set(p, wstart) == 0) {
261 if (!BN_mod_mul_reciprocal(r, r, r, &recp, ctx))
269 * We now have wstart on a 'set' bit, we now need to work out how bit
270 * a window to do. To do this we need to scan forward until the last
271 * set bit before the end of the window
275 for (i = 1; i < window; i++) {
278 if (BN_is_bit_set(p, wstart - i)) {
279 wvalue <<= (i - wend);
285 /* wend is the size of the current window */
287 /* add the 'bytes above' */
289 for (i = 0; i < j; i++) {
290 if (!BN_mod_mul_reciprocal(r, r, r, &recp, ctx))
294 /* wvalue will be an odd number < 2^window */
295 if (!BN_mod_mul_reciprocal(r, r, val[wvalue >> 1], &recp, ctx))
298 /* move the 'window' down further */
307 BN_RECP_CTX_free(&recp);
312 int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
313 const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *in_mont)
315 int i, j, bits, ret = 0, wstart, wend, window;
319 /* Table of variables obtained from 'ctx' */
320 BIGNUM *val[TABLE_SIZE];
321 BN_MONT_CTX *mont = NULL;
328 ERR_raise(ERR_LIB_BN, BN_R_CALLED_WITH_EVEN_MODULUS);
332 if (m->top <= BN_CONSTTIME_SIZE_LIMIT
333 && (BN_get_flags(p, BN_FLG_CONSTTIME) != 0
334 || BN_get_flags(a, BN_FLG_CONSTTIME) != 0
335 || BN_get_flags(m, BN_FLG_CONSTTIME) != 0)) {
336 return BN_mod_exp_mont_consttime(rr, a, p, m, ctx, in_mont);
339 bits = BN_num_bits(p);
341 /* x**0 mod 1, or x**0 mod -1 is still zero. */
342 if (BN_abs_is_word(m, 1)) {
354 val[0] = BN_CTX_get(ctx);
359 * If this is not done, things will break in the montgomery part
365 if ((mont = BN_MONT_CTX_new()) == NULL)
367 if (!BN_MONT_CTX_set(mont, m, ctx))
371 if (a->neg || BN_ucmp(a, m) >= 0) {
372 if (!BN_nnmod(val[0], a, m, ctx))
377 if (!bn_to_mont_fixed_top(val[0], aa, mont, ctx))
380 window = BN_window_bits_for_exponent_size(bits);
382 if (!bn_mul_mont_fixed_top(d, val[0], val[0], mont, ctx))
384 j = 1 << (window - 1);
385 for (i = 1; i < j; i++) {
386 if (((val[i] = BN_CTX_get(ctx)) == NULL) ||
387 !bn_mul_mont_fixed_top(val[i], val[i - 1], d, mont, ctx))
392 start = 1; /* This is used to avoid multiplication etc
393 * when there is only the value '1' in the
395 wstart = bits - 1; /* The top bit of the window */
396 wend = 0; /* The bottom bit of the window */
398 #if 1 /* by Shay Gueron's suggestion */
399 j = m->top; /* borrow j */
400 if (m->d[j - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) {
401 if (bn_wexpand(r, j) == NULL)
403 /* 2^(top*BN_BITS2) - m */
404 r->d[0] = (0 - m->d[0]) & BN_MASK2;
405 for (i = 1; i < j; i++)
406 r->d[i] = (~m->d[i]) & BN_MASK2;
408 r->flags |= BN_FLG_FIXED_TOP;
411 if (!bn_to_mont_fixed_top(r, BN_value_one(), mont, ctx))
414 int wvalue; /* The 'value' of the window */
416 if (BN_is_bit_set(p, wstart) == 0) {
418 if (!bn_mul_mont_fixed_top(r, r, r, mont, ctx))
427 * We now have wstart on a 'set' bit, we now need to work out how bit
428 * a window to do. To do this we need to scan forward until the last
429 * set bit before the end of the window
433 for (i = 1; i < window; i++) {
436 if (BN_is_bit_set(p, wstart - i)) {
437 wvalue <<= (i - wend);
443 /* wend is the size of the current window */
445 /* add the 'bytes above' */
447 for (i = 0; i < j; i++) {
448 if (!bn_mul_mont_fixed_top(r, r, r, mont, ctx))
452 /* wvalue will be an odd number < 2^window */
453 if (!bn_mul_mont_fixed_top(r, r, val[wvalue >> 1], mont, ctx))
456 /* move the 'window' down further */
463 * Done with zero-padded intermediate BIGNUMs. Final BN_from_montgomery
464 * removes padding [if any] and makes return value suitable for public
467 #if defined(SPARC_T4_MONT)
468 if (OPENSSL_sparcv9cap_P[0] & (SPARCV9_VIS3 | SPARCV9_PREFER_FPU)) {
469 j = mont->N.top; /* borrow j */
470 val[0]->d[0] = 1; /* borrow val[0] */
471 for (i = 1; i < j; i++)
474 if (!BN_mod_mul_montgomery(rr, r, val[0], mont, ctx))
478 if (!BN_from_montgomery(rr, r, mont, ctx))
483 BN_MONT_CTX_free(mont);
489 static BN_ULONG bn_get_bits(const BIGNUM *a, int bitpos)
494 wordpos = bitpos / BN_BITS2;
496 if (wordpos >= 0 && wordpos < a->top) {
497 ret = a->d[wordpos] & BN_MASK2;
500 if (++wordpos < a->top)
501 ret |= a->d[wordpos] << (BN_BITS2 - bitpos);
505 return ret & BN_MASK2;
509 * BN_mod_exp_mont_consttime() stores the precomputed powers in a specific
510 * layout so that accessing any of these table values shows the same access
511 * pattern as far as cache lines are concerned. The following functions are
512 * used to transfer a BIGNUM from/to that table.
515 static int MOD_EXP_CTIME_COPY_TO_PREBUF(const BIGNUM *b, int top,
516 unsigned char *buf, int idx,
520 int width = 1 << window;
521 BN_ULONG *table = (BN_ULONG *)buf;
524 top = b->top; /* this works because 'buf' is explicitly
526 for (i = 0, j = idx; i < top; i++, j += width) {
533 static int MOD_EXP_CTIME_COPY_FROM_PREBUF(BIGNUM *b, int top,
534 unsigned char *buf, int idx,
538 int width = 1 << window;
540 * We declare table 'volatile' in order to discourage compiler
541 * from reordering loads from the table. Concern is that if
542 * reordered in specific manner loads might give away the
543 * information we are trying to conceal. Some would argue that
544 * compiler can reorder them anyway, but it can as well be
545 * argued that doing so would be violation of standard...
547 volatile BN_ULONG *table = (volatile BN_ULONG *)buf;
549 if (bn_wexpand(b, top) == NULL)
553 for (i = 0; i < top; i++, table += width) {
556 for (j = 0; j < width; j++) {
558 ((BN_ULONG)0 - (constant_time_eq_int(j,idx)&1));
564 int xstride = 1 << (window - 2);
565 BN_ULONG y0, y1, y2, y3;
567 i = idx >> (window - 2); /* equivalent of idx / xstride */
568 idx &= xstride - 1; /* equivalent of idx % xstride */
570 y0 = (BN_ULONG)0 - (constant_time_eq_int(i,0)&1);
571 y1 = (BN_ULONG)0 - (constant_time_eq_int(i,1)&1);
572 y2 = (BN_ULONG)0 - (constant_time_eq_int(i,2)&1);
573 y3 = (BN_ULONG)0 - (constant_time_eq_int(i,3)&1);
575 for (i = 0; i < top; i++, table += width) {
578 for (j = 0; j < xstride; j++) {
579 acc |= ( (table[j + 0 * xstride] & y0) |
580 (table[j + 1 * xstride] & y1) |
581 (table[j + 2 * xstride] & y2) |
582 (table[j + 3 * xstride] & y3) )
583 & ((BN_ULONG)0 - (constant_time_eq_int(j,idx)&1));
591 b->flags |= BN_FLG_FIXED_TOP;
596 * Given a pointer value, compute the next address that is a cache line
599 #define MOD_EXP_CTIME_ALIGN(x_) \
600 ((unsigned char*)(x_) + (MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH - (((size_t)(x_)) & (MOD_EXP_CTIME_MIN_CACHE_LINE_MASK))))
603 * This variant of BN_mod_exp_mont() uses fixed windows and the special
604 * precomputation memory layout to limit data-dependency to a minimum to
605 * protect secret exponents (cf. the hyper-threading timing attacks pointed
606 * out by Colin Percival,
607 * http://www.daemonology.net/hyperthreading-considered-harmful/)
609 int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
610 const BIGNUM *m, BN_CTX *ctx,
611 BN_MONT_CTX *in_mont)
613 int i, bits, ret = 0, window, wvalue, wmask, window0;
615 BN_MONT_CTX *mont = NULL;
618 unsigned char *powerbufFree = NULL;
620 unsigned char *powerbuf = NULL;
622 #if defined(SPARC_T4_MONT)
631 ERR_raise(ERR_LIB_BN, BN_R_CALLED_WITH_EVEN_MODULUS);
637 if (top > BN_CONSTTIME_SIZE_LIMIT) {
638 /* Prevent overflowing the powerbufLen computation below */
639 return BN_mod_exp_mont(rr, a, p, m, ctx, in_mont);
643 * Use all bits stored in |p|, rather than |BN_num_bits|, so we do not leak
644 * whether the top bits are zero.
646 bits = p->top * BN_BITS2;
648 /* x**0 mod 1, or x**0 mod -1 is still zero. */
649 if (BN_abs_is_word(m, 1)) {
661 * Allocate a montgomery context if it was not supplied by the caller. If
662 * this is not done, things will break in the montgomery part.
667 if ((mont = BN_MONT_CTX_new()) == NULL)
669 if (!BN_MONT_CTX_set(mont, m, ctx))
673 if (a->neg || BN_ucmp(a, m) >= 0) {
674 BIGNUM *reduced = BN_CTX_get(ctx);
676 || !BN_nnmod(reduced, a, m, ctx)) {
684 * If the size of the operands allow it, perform the optimized
685 * RSAZ exponentiation. For further information see
686 * crypto/bn/rsaz_exp.c and accompanying assembly modules.
688 if ((16 == a->top) && (16 == p->top) && (BN_num_bits(m) == 1024)
689 && rsaz_avx2_eligible()) {
690 if (NULL == bn_wexpand(rr, 16))
692 RSAZ_1024_mod_exp_avx2(rr->d, a->d, p->d, m->d, mont->RR.d,
699 } else if ((8 == a->top) && (8 == p->top) && (BN_num_bits(m) == 512)) {
700 if (NULL == bn_wexpand(rr, 8))
702 RSAZ_512_mod_exp(rr->d, a->d, p->d, m->d, mont->n0[0], mont->RR.d);
711 /* Get the window size to use with size of p. */
712 window = BN_window_bits_for_ctime_exponent_size(bits);
713 #if defined(SPARC_T4_MONT)
714 if (window >= 5 && (top & 15) == 0 && top <= 64 &&
715 (OPENSSL_sparcv9cap_P[1] & (CFR_MONTMUL | CFR_MONTSQR)) ==
716 (CFR_MONTMUL | CFR_MONTSQR) && (t4 = OPENSSL_sparcv9cap_P[0]))
720 #if defined(OPENSSL_BN_ASM_MONT5)
721 if (window >= 5 && top <= BN_SOFT_LIMIT) {
722 window = 5; /* ~5% improvement for RSA2048 sign, and even
724 /* reserve space for mont->N.d[] copy */
725 powerbufLen += top * sizeof(mont->N.d[0]);
731 * Allocate a buffer large enough to hold all of the pre-computed powers
732 * of am, am itself and tmp.
734 numPowers = 1 << window;
735 powerbufLen += sizeof(m->d[0]) * (top * numPowers +
737 numPowers ? (2 * top) : numPowers));
739 if (powerbufLen < 3072)
741 alloca(powerbufLen + MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH);
745 OPENSSL_malloc(powerbufLen + MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH))
749 powerbuf = MOD_EXP_CTIME_ALIGN(powerbufFree);
750 memset(powerbuf, 0, powerbufLen);
753 if (powerbufLen < 3072)
757 /* lay down tmp and am right after powers table */
758 tmp.d = (BN_ULONG *)(powerbuf + sizeof(m->d[0]) * top * numPowers);
760 tmp.top = am.top = 0;
761 tmp.dmax = am.dmax = top;
762 tmp.neg = am.neg = 0;
763 tmp.flags = am.flags = BN_FLG_STATIC_DATA;
765 /* prepare a^0 in Montgomery domain */
766 #if 1 /* by Shay Gueron's suggestion */
767 if (m->d[top - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) {
768 /* 2^(top*BN_BITS2) - m */
769 tmp.d[0] = (0 - m->d[0]) & BN_MASK2;
770 for (i = 1; i < top; i++)
771 tmp.d[i] = (~m->d[i]) & BN_MASK2;
775 if (!bn_to_mont_fixed_top(&tmp, BN_value_one(), mont, ctx))
778 /* prepare a^1 in Montgomery domain */
779 if (!bn_to_mont_fixed_top(&am, a, mont, ctx))
782 if (top > BN_SOFT_LIMIT)
785 #if defined(SPARC_T4_MONT)
787 typedef int (*bn_pwr5_mont_f) (BN_ULONG *tp, const BN_ULONG *np,
788 const BN_ULONG *n0, const void *table,
789 int power, int bits);
790 int bn_pwr5_mont_t4_8(BN_ULONG *tp, const BN_ULONG *np,
791 const BN_ULONG *n0, const void *table,
792 int power, int bits);
793 int bn_pwr5_mont_t4_16(BN_ULONG *tp, const BN_ULONG *np,
794 const BN_ULONG *n0, const void *table,
795 int power, int bits);
796 int bn_pwr5_mont_t4_24(BN_ULONG *tp, const BN_ULONG *np,
797 const BN_ULONG *n0, const void *table,
798 int power, int bits);
799 int bn_pwr5_mont_t4_32(BN_ULONG *tp, const BN_ULONG *np,
800 const BN_ULONG *n0, const void *table,
801 int power, int bits);
802 static const bn_pwr5_mont_f pwr5_funcs[4] = {
803 bn_pwr5_mont_t4_8, bn_pwr5_mont_t4_16,
804 bn_pwr5_mont_t4_24, bn_pwr5_mont_t4_32
806 bn_pwr5_mont_f pwr5_worker = pwr5_funcs[top / 16 - 1];
808 typedef int (*bn_mul_mont_f) (BN_ULONG *rp, const BN_ULONG *ap,
809 const void *bp, const BN_ULONG *np,
811 int bn_mul_mont_t4_8(BN_ULONG *rp, const BN_ULONG *ap, const void *bp,
812 const BN_ULONG *np, const BN_ULONG *n0);
813 int bn_mul_mont_t4_16(BN_ULONG *rp, const BN_ULONG *ap,
814 const void *bp, const BN_ULONG *np,
816 int bn_mul_mont_t4_24(BN_ULONG *rp, const BN_ULONG *ap,
817 const void *bp, const BN_ULONG *np,
819 int bn_mul_mont_t4_32(BN_ULONG *rp, const BN_ULONG *ap,
820 const void *bp, const BN_ULONG *np,
822 static const bn_mul_mont_f mul_funcs[4] = {
823 bn_mul_mont_t4_8, bn_mul_mont_t4_16,
824 bn_mul_mont_t4_24, bn_mul_mont_t4_32
826 bn_mul_mont_f mul_worker = mul_funcs[top / 16 - 1];
828 void bn_mul_mont_vis3(BN_ULONG *rp, const BN_ULONG *ap,
829 const void *bp, const BN_ULONG *np,
830 const BN_ULONG *n0, int num);
831 void bn_mul_mont_t4(BN_ULONG *rp, const BN_ULONG *ap,
832 const void *bp, const BN_ULONG *np,
833 const BN_ULONG *n0, int num);
834 void bn_mul_mont_gather5_t4(BN_ULONG *rp, const BN_ULONG *ap,
835 const void *table, const BN_ULONG *np,
836 const BN_ULONG *n0, int num, int power);
837 void bn_flip_n_scatter5_t4(const BN_ULONG *inp, size_t num,
838 void *table, size_t power);
839 void bn_gather5_t4(BN_ULONG *out, size_t num,
840 void *table, size_t power);
841 void bn_flip_t4(BN_ULONG *dst, BN_ULONG *src, size_t num);
843 BN_ULONG *np = mont->N.d, *n0 = mont->n0;
844 int stride = 5 * (6 - (top / 16 - 1)); /* multiple of 5, but less
848 * BN_to_montgomery can contaminate words above .top [in
851 for (i = am.top; i < top; i++)
853 for (i = tmp.top; i < top; i++)
856 bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, 0);
857 bn_flip_n_scatter5_t4(am.d, top, powerbuf, 1);
858 if (!(*mul_worker) (tmp.d, am.d, am.d, np, n0) &&
859 !(*mul_worker) (tmp.d, am.d, am.d, np, n0))
860 bn_mul_mont_vis3(tmp.d, am.d, am.d, np, n0, top);
861 bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, 2);
863 for (i = 3; i < 32; i++) {
864 /* Calculate a^i = a^(i-1) * a */
865 if (!(*mul_worker) (tmp.d, tmp.d, am.d, np, n0) &&
866 !(*mul_worker) (tmp.d, tmp.d, am.d, np, n0))
867 bn_mul_mont_vis3(tmp.d, tmp.d, am.d, np, n0, top);
868 bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, i);
871 /* switch to 64-bit domain */
872 np = alloca(top * sizeof(BN_ULONG));
874 bn_flip_t4(np, mont->N.d, top);
877 * The exponent may not have a whole number of fixed-size windows.
878 * To simplify the main loop, the initial window has between 1 and
879 * full-window-size bits such that what remains is always a whole
882 window0 = (bits - 1) % 5 + 1;
883 wmask = (1 << window0) - 1;
885 wvalue = bn_get_bits(p, bits) & wmask;
886 bn_gather5_t4(tmp.d, top, powerbuf, wvalue);
889 * Scan the exponent one window at a time starting from the most
896 wvalue = bn_get_bits(p, bits);
898 if ((*pwr5_worker) (tmp.d, np, n0, powerbuf, wvalue, stride))
900 /* retry once and fall back */
901 if ((*pwr5_worker) (tmp.d, np, n0, powerbuf, wvalue, stride))
905 wvalue >>= stride - 5;
907 bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
908 bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
909 bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
910 bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
911 bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
912 bn_mul_mont_gather5_t4(tmp.d, tmp.d, powerbuf, np, n0, top,
916 bn_flip_t4(tmp.d, tmp.d, top);
918 /* back to 32-bit domain */
920 bn_correct_top(&tmp);
921 OPENSSL_cleanse(np, top * sizeof(BN_ULONG));
924 #if defined(OPENSSL_BN_ASM_MONT5)
925 if (window == 5 && top > 1) {
927 * This optimization uses ideas from https://eprint.iacr.org/2011/239,
928 * specifically optimization of cache-timing attack countermeasures,
929 * pre-computation optimization, and Almost Montgomery Multiplication.
931 * The paper discusses a 4-bit window to optimize 512-bit modular
932 * exponentiation, used in RSA-1024 with CRT, but RSA-1024 is no longer
935 * |bn_mul_mont_gather5| and |bn_power5| implement the "almost"
936 * reduction variant, so the values here may not be fully reduced.
937 * They are bounded by R (i.e. they fit in |top| words), not |m|.
938 * Additionally, we pass these "almost" reduced inputs into
939 * |bn_mul_mont|, which implements the normal reduction variant.
940 * Given those inputs, |bn_mul_mont| may not give reduced
941 * output, but it will still produce "almost" reduced output.
943 void bn_mul_mont_gather5(BN_ULONG *rp, const BN_ULONG *ap,
944 const void *table, const BN_ULONG *np,
945 const BN_ULONG *n0, int num, int power);
946 void bn_scatter5(const BN_ULONG *inp, size_t num,
947 void *table, size_t power);
948 void bn_gather5(BN_ULONG *out, size_t num, void *table, size_t power);
949 void bn_power5(BN_ULONG *rp, const BN_ULONG *ap,
950 const void *table, const BN_ULONG *np,
951 const BN_ULONG *n0, int num, int power);
952 int bn_get_bits5(const BN_ULONG *ap, int off);
954 BN_ULONG *n0 = mont->n0, *np;
957 * BN_to_montgomery can contaminate words above .top [in
960 for (i = am.top; i < top; i++)
962 for (i = tmp.top; i < top; i++)
966 * copy mont->N.d[] to improve cache locality
968 for (np = am.d + top, i = 0; i < top; i++)
969 np[i] = mont->N.d[i];
971 bn_scatter5(tmp.d, top, powerbuf, 0);
972 bn_scatter5(am.d, am.top, powerbuf, 1);
973 bn_mul_mont(tmp.d, am.d, am.d, np, n0, top);
974 bn_scatter5(tmp.d, top, powerbuf, 2);
977 for (i = 3; i < 32; i++) {
978 /* Calculate a^i = a^(i-1) * a */
979 bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1);
980 bn_scatter5(tmp.d, top, powerbuf, i);
983 /* same as above, but uses squaring for 1/2 of operations */
984 for (i = 4; i < 32; i *= 2) {
985 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
986 bn_scatter5(tmp.d, top, powerbuf, i);
988 for (i = 3; i < 8; i += 2) {
990 bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1);
991 bn_scatter5(tmp.d, top, powerbuf, i);
992 for (j = 2 * i; j < 32; j *= 2) {
993 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
994 bn_scatter5(tmp.d, top, powerbuf, j);
997 for (; i < 16; i += 2) {
998 bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1);
999 bn_scatter5(tmp.d, top, powerbuf, i);
1000 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
1001 bn_scatter5(tmp.d, top, powerbuf, 2 * i);
1003 for (; i < 32; i += 2) {
1004 bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1);
1005 bn_scatter5(tmp.d, top, powerbuf, i);
1009 * The exponent may not have a whole number of fixed-size windows.
1010 * To simplify the main loop, the initial window has between 1 and
1011 * full-window-size bits such that what remains is always a whole
1014 window0 = (bits - 1) % 5 + 1;
1015 wmask = (1 << window0) - 1;
1017 wvalue = bn_get_bits(p, bits) & wmask;
1018 bn_gather5(tmp.d, top, powerbuf, wvalue);
1021 * Scan the exponent one window at a time starting from the most
1026 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
1027 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
1028 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
1029 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
1030 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
1031 bn_mul_mont_gather5(tmp.d, tmp.d, powerbuf, np, n0, top,
1032 bn_get_bits5(p->d, bits -= 5));
1036 bn_power5(tmp.d, tmp.d, powerbuf, np, n0, top,
1037 bn_get_bits5(p->d, bits -= 5));
1043 * The result is now in |tmp| in Montgomery form, but it may not be
1044 * fully reduced. This is within bounds for |BN_from_montgomery|
1045 * (tmp < R <= m*R) so it will, when converting from Montgomery form,
1046 * produce a fully reduced result.
1048 * This differs from Figure 2 of the paper, which uses AMM(h, 1) to
1049 * convert from Montgomery form with unreduced output, followed by an
1050 * extra reduction step. In the paper's terminology, we replace
1051 * steps 9 and 10 with MM(h, 1).
1057 if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 0, window))
1059 if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&am, top, powerbuf, 1, window))
1063 * If the window size is greater than 1, then calculate
1064 * val[i=2..2^winsize-1]. Powers are computed as a*a^(i-1) (even
1065 * powers could instead be computed as (a^(i/2))^2 to use the slight
1066 * performance advantage of sqr over mul).
1069 if (!bn_mul_mont_fixed_top(&tmp, &am, &am, mont, ctx))
1071 if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 2,
1074 for (i = 3; i < numPowers; i++) {
1075 /* Calculate a^i = a^(i-1) * a */
1076 if (!bn_mul_mont_fixed_top(&tmp, &am, &tmp, mont, ctx))
1078 if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, i,
1085 * The exponent may not have a whole number of fixed-size windows.
1086 * To simplify the main loop, the initial window has between 1 and
1087 * full-window-size bits such that what remains is always a whole
1090 window0 = (bits - 1) % window + 1;
1091 wmask = (1 << window0) - 1;
1093 wvalue = bn_get_bits(p, bits) & wmask;
1094 if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&tmp, top, powerbuf, wvalue,
1098 wmask = (1 << window) - 1;
1100 * Scan the exponent one window at a time starting from the most
1105 /* Square the result window-size times */
1106 for (i = 0; i < window; i++)
1107 if (!bn_mul_mont_fixed_top(&tmp, &tmp, &tmp, mont, ctx))
1111 * Get a window's worth of bits from the exponent
1112 * This avoids calling BN_is_bit_set for each bit, which
1113 * is not only slower but also makes each bit vulnerable to
1114 * EM (and likely other) side-channel attacks like One&Done
1115 * (for details see "One&Done: A Single-Decryption EM-Based
1116 * Attack on OpenSSL's Constant-Time Blinded RSA" by M. Alam,
1117 * H. Khan, M. Dey, N. Sinha, R. Callan, A. Zajic, and
1118 * M. Prvulovic, in USENIX Security'18)
1121 wvalue = bn_get_bits(p, bits) & wmask;
1123 * Fetch the appropriate pre-computed value from the pre-buf
1125 if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&am, top, powerbuf, wvalue,
1129 /* Multiply the result into the intermediate result */
1130 if (!bn_mul_mont_fixed_top(&tmp, &tmp, &am, mont, ctx))
1136 * Done with zero-padded intermediate BIGNUMs. Final BN_from_montgomery
1137 * removes padding [if any] and makes return value suitable for public
1140 #if defined(SPARC_T4_MONT)
1141 if (OPENSSL_sparcv9cap_P[0] & (SPARCV9_VIS3 | SPARCV9_PREFER_FPU)) {
1142 am.d[0] = 1; /* borrow am */
1143 for (i = 1; i < top; i++)
1145 if (!BN_mod_mul_montgomery(rr, &tmp, &am, mont, ctx))
1149 if (!BN_from_montgomery(rr, &tmp, mont, ctx))
1153 if (in_mont == NULL)
1154 BN_MONT_CTX_free(mont);
1155 if (powerbuf != NULL) {
1156 OPENSSL_cleanse(powerbuf, powerbufLen);
1157 OPENSSL_free(powerbufFree);
1163 int BN_mod_exp_mont_word(BIGNUM *rr, BN_ULONG a, const BIGNUM *p,
1164 const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *in_mont)
1166 BN_MONT_CTX *mont = NULL;
1167 int b, bits, ret = 0;
1172 #define BN_MOD_MUL_WORD(r, w, m) \
1173 (BN_mul_word(r, (w)) && \
1174 (/* BN_ucmp(r, (m)) < 0 ? 1 :*/ \
1175 (BN_mod(t, r, m, ctx) && (swap_tmp = r, r = t, t = swap_tmp, 1))))
1177 * BN_MOD_MUL_WORD is only used with 'w' large, so the BN_ucmp test is
1178 * probably more overhead than always using BN_mod (which uses BN_copy if
1179 * a similar test returns true).
1182 * We can use BN_mod and do not need BN_nnmod because our accumulator is
1183 * never negative (the result of BN_mod does not depend on the sign of
1186 #define BN_TO_MONTGOMERY_WORD(r, w, mont) \
1187 (BN_set_word(r, (w)) && BN_to_montgomery(r, r, (mont), ctx))
1189 if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0
1190 || BN_get_flags(m, BN_FLG_CONSTTIME) != 0) {
1191 /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */
1192 ERR_raise(ERR_LIB_BN, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
1199 if (!BN_is_odd(m)) {
1200 ERR_raise(ERR_LIB_BN, BN_R_CALLED_WITH_EVEN_MODULUS);
1204 a %= m->d[0]; /* make sure that 'a' is reduced */
1206 bits = BN_num_bits(p);
1208 /* x**0 mod 1, or x**0 mod -1 is still zero. */
1209 if (BN_abs_is_word(m, 1)) {
1224 r = BN_CTX_get(ctx);
1225 t = BN_CTX_get(ctx);
1229 if (in_mont != NULL)
1232 if ((mont = BN_MONT_CTX_new()) == NULL)
1234 if (!BN_MONT_CTX_set(mont, m, ctx))
1238 r_is_one = 1; /* except for Montgomery factor */
1242 /* The result is accumulated in the product r*w. */
1243 w = a; /* bit 'bits-1' of 'p' is always set */
1244 for (b = bits - 2; b >= 0; b--) {
1245 /* First, square r*w. */
1247 if ((next_w / w) != w) { /* overflow */
1249 if (!BN_TO_MONTGOMERY_WORD(r, w, mont))
1253 if (!BN_MOD_MUL_WORD(r, w, m))
1260 if (!BN_mod_mul_montgomery(r, r, r, mont, ctx))
1264 /* Second, multiply r*w by 'a' if exponent bit is set. */
1265 if (BN_is_bit_set(p, b)) {
1267 if ((next_w / a) != w) { /* overflow */
1269 if (!BN_TO_MONTGOMERY_WORD(r, w, mont))
1273 if (!BN_MOD_MUL_WORD(r, w, m))
1282 /* Finally, set r:=r*w. */
1285 if (!BN_TO_MONTGOMERY_WORD(r, w, mont))
1289 if (!BN_MOD_MUL_WORD(r, w, m))
1294 if (r_is_one) { /* can happen only if a == 1 */
1298 if (!BN_from_montgomery(rr, r, mont, ctx))
1303 if (in_mont == NULL)
1304 BN_MONT_CTX_free(mont);
1310 /* The old fallback, simple version :-) */
1311 int BN_mod_exp_simple(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
1312 const BIGNUM *m, BN_CTX *ctx)
1314 int i, j, bits, ret = 0, wstart, wend, window;
1317 /* Table of variables obtained from 'ctx' */
1318 BIGNUM *val[TABLE_SIZE];
1320 if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0
1321 || BN_get_flags(a, BN_FLG_CONSTTIME) != 0
1322 || BN_get_flags(m, BN_FLG_CONSTTIME) != 0) {
1323 /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */
1324 ERR_raise(ERR_LIB_BN, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
1329 ERR_raise(ERR_LIB_BN, ERR_R_PASSED_INVALID_ARGUMENT);
1333 bits = BN_num_bits(p);
1335 /* x**0 mod 1, or x**0 mod -1 is still zero. */
1336 if (BN_abs_is_word(m, 1)) {
1346 d = BN_CTX_get(ctx);
1347 val[0] = BN_CTX_get(ctx);
1351 if (!BN_nnmod(val[0], a, m, ctx))
1353 if (BN_is_zero(val[0])) {
1359 window = BN_window_bits_for_exponent_size(bits);
1361 if (!BN_mod_mul(d, val[0], val[0], m, ctx))
1363 j = 1 << (window - 1);
1364 for (i = 1; i < j; i++) {
1365 if (((val[i] = BN_CTX_get(ctx)) == NULL) ||
1366 !BN_mod_mul(val[i], val[i - 1], d, m, ctx))
1371 start = 1; /* This is used to avoid multiplication etc
1372 * when there is only the value '1' in the
1374 wstart = bits - 1; /* The top bit of the window */
1375 wend = 0; /* The bottom bit of the window */
1378 BIGNUM *p_dup = BN_CTX_get(ctx);
1380 if (p_dup == NULL || BN_copy(p_dup, p) == NULL)
1389 int wvalue; /* The 'value' of the window */
1391 if (BN_is_bit_set(p, wstart) == 0) {
1393 if (!BN_mod_mul(r, r, r, m, ctx))
1401 * We now have wstart on a 'set' bit, we now need to work out how bit
1402 * a window to do. To do this we need to scan forward until the last
1403 * set bit before the end of the window
1407 for (i = 1; i < window; i++) {
1410 if (BN_is_bit_set(p, wstart - i)) {
1411 wvalue <<= (i - wend);
1417 /* wend is the size of the current window */
1419 /* add the 'bytes above' */
1421 for (i = 0; i < j; i++) {
1422 if (!BN_mod_mul(r, r, r, m, ctx))
1426 /* wvalue will be an odd number < 2^window */
1427 if (!BN_mod_mul(r, r, val[wvalue >> 1], m, ctx))
1430 /* move the 'window' down further */
1444 * This is a variant of modular exponentiation optimization that does
1445 * parallel 2-primes exponentiation using 256-bit (AVX512VL) AVX512_IFMA ISA
1446 * in 52-bit binary redundant representation.
1447 * If such instructions are not available, or input data size is not supported,
1448 * it falls back to two BN_mod_exp_mont_consttime() calls.
1450 int BN_mod_exp_mont_consttime_x2(BIGNUM *rr1, const BIGNUM *a1, const BIGNUM *p1,
1451 const BIGNUM *m1, BN_MONT_CTX *in_mont1,
1452 BIGNUM *rr2, const BIGNUM *a2, const BIGNUM *p2,
1453 const BIGNUM *m2, BN_MONT_CTX *in_mont2,
1459 BN_MONT_CTX *mont1 = NULL;
1460 BN_MONT_CTX *mont2 = NULL;
1462 if (ossl_rsaz_avx512ifma_eligible() &&
1463 (((a1->top == 16) && (p1->top == 16) && (BN_num_bits(m1) == 1024) &&
1464 (a2->top == 16) && (p2->top == 16) && (BN_num_bits(m2) == 1024)) ||
1465 ((a1->top == 24) && (p1->top == 24) && (BN_num_bits(m1) == 1536) &&
1466 (a2->top == 24) && (p2->top == 24) && (BN_num_bits(m2) == 1536)) ||
1467 ((a1->top == 32) && (p1->top == 32) && (BN_num_bits(m1) == 2048) &&
1468 (a2->top == 32) && (p2->top == 32) && (BN_num_bits(m2) == 2048)))) {
1471 /* Modulus bits of |m1| and |m2| are equal */
1472 int mod_bits = BN_num_bits(m1);
1474 if (bn_wexpand(rr1, topn) == NULL)
1476 if (bn_wexpand(rr2, topn) == NULL)
1479 /* Ensure that montgomery contexts are initialized */
1480 if (in_mont1 != NULL) {
1483 if ((mont1 = BN_MONT_CTX_new()) == NULL)
1485 if (!BN_MONT_CTX_set(mont1, m1, ctx))
1488 if (in_mont2 != NULL) {
1491 if ((mont2 = BN_MONT_CTX_new()) == NULL)
1493 if (!BN_MONT_CTX_set(mont2, m2, ctx))
1497 ret = ossl_rsaz_mod_exp_avx512_x2(rr1->d, a1->d, p1->d, m1->d,
1498 mont1->RR.d, mont1->n0[0],
1499 rr2->d, a2->d, p2->d, m2->d,
1500 mont2->RR.d, mont2->n0[0],
1505 bn_correct_top(rr1);
1510 bn_correct_top(rr2);
1517 /* rr1 = a1^p1 mod m1 */
1518 ret = BN_mod_exp_mont_consttime(rr1, a1, p1, m1, ctx, in_mont1);
1519 /* rr2 = a2^p2 mod m2 */
1520 ret &= BN_mod_exp_mont_consttime(rr2, a2, p2, m2, ctx, in_mont2);
1524 if (in_mont2 == NULL)
1525 BN_MONT_CTX_free(mont2);
1526 if (in_mont1 == NULL)
1527 BN_MONT_CTX_free(mont1);