2 * Copyright 1995-2022 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
11 * RSA low level APIs are deprecated for public use, but still ok for
14 #include "internal/deprecated.h"
16 #include "internal/cryptlib.h"
17 #include "crypto/bn.h"
18 #include "rsa_local.h"
19 #include "internal/constant_time.h"
20 #include <openssl/evp.h>
21 #include <openssl/sha.h>
22 #include <openssl/hmac.h>
24 static int rsa_ossl_public_encrypt(int flen, const unsigned char *from,
25 unsigned char *to, RSA *rsa, int padding);
26 static int rsa_ossl_private_encrypt(int flen, const unsigned char *from,
27 unsigned char *to, RSA *rsa, int padding);
28 static int rsa_ossl_public_decrypt(int flen, const unsigned char *from,
29 unsigned char *to, RSA *rsa, int padding);
30 static int rsa_ossl_private_decrypt(int flen, const unsigned char *from,
31 unsigned char *to, RSA *rsa, int padding);
32 static int rsa_ossl_mod_exp(BIGNUM *r0, const BIGNUM *i, RSA *rsa,
34 static int rsa_ossl_init(RSA *rsa);
35 static int rsa_ossl_finish(RSA *rsa);
36 static RSA_METHOD rsa_pkcs1_ossl_meth = {
38 rsa_ossl_public_encrypt,
39 rsa_ossl_public_decrypt, /* signature verification */
40 rsa_ossl_private_encrypt, /* signing */
41 rsa_ossl_private_decrypt,
43 BN_mod_exp_mont, /* XXX probably we should not use Montgomery
47 RSA_FLAG_FIPS_METHOD, /* flags */
51 NULL, /* rsa_keygen */
52 NULL /* rsa_multi_prime_keygen */
55 static const RSA_METHOD *default_RSA_meth = &rsa_pkcs1_ossl_meth;
57 void RSA_set_default_method(const RSA_METHOD *meth)
59 default_RSA_meth = meth;
62 const RSA_METHOD *RSA_get_default_method(void)
64 return default_RSA_meth;
67 const RSA_METHOD *RSA_PKCS1_OpenSSL(void)
69 return &rsa_pkcs1_ossl_meth;
72 const RSA_METHOD *RSA_null_method(void)
77 static int rsa_ossl_public_encrypt(int flen, const unsigned char *from,
78 unsigned char *to, RSA *rsa, int padding)
81 int i, num = 0, r = -1;
82 unsigned char *buf = NULL;
85 if (BN_num_bits(rsa->n) > OPENSSL_RSA_MAX_MODULUS_BITS) {
86 ERR_raise(ERR_LIB_RSA, RSA_R_MODULUS_TOO_LARGE);
90 if (BN_ucmp(rsa->n, rsa->e) <= 0) {
91 ERR_raise(ERR_LIB_RSA, RSA_R_BAD_E_VALUE);
95 /* for large moduli, enforce exponent limit */
96 if (BN_num_bits(rsa->n) > OPENSSL_RSA_SMALL_MODULUS_BITS) {
97 if (BN_num_bits(rsa->e) > OPENSSL_RSA_MAX_PUBEXP_BITS) {
98 ERR_raise(ERR_LIB_RSA, RSA_R_BAD_E_VALUE);
103 if ((ctx = BN_CTX_new_ex(rsa->libctx)) == NULL)
107 ret = BN_CTX_get(ctx);
108 num = BN_num_bytes(rsa->n);
109 buf = OPENSSL_malloc(num);
110 if (ret == NULL || buf == NULL)
114 case RSA_PKCS1_PADDING:
115 i = ossl_rsa_padding_add_PKCS1_type_2_ex(rsa->libctx, buf, num,
118 case RSA_PKCS1_OAEP_PADDING:
119 i = ossl_rsa_padding_add_PKCS1_OAEP_mgf1_ex(rsa->libctx, buf, num,
124 i = RSA_padding_add_none(buf, num, from, flen);
127 ERR_raise(ERR_LIB_RSA, RSA_R_UNKNOWN_PADDING_TYPE);
133 if (BN_bin2bn(buf, num, f) == NULL)
136 if (BN_ucmp(f, rsa->n) >= 0) {
137 /* usually the padding functions would catch this */
138 ERR_raise(ERR_LIB_RSA, RSA_R_DATA_TOO_LARGE_FOR_MODULUS);
142 if (rsa->flags & RSA_FLAG_CACHE_PUBLIC)
143 if (!BN_MONT_CTX_set_locked(&rsa->_method_mod_n, rsa->lock,
147 if (!rsa->meth->bn_mod_exp(ret, f, rsa->e, rsa->n, ctx,
152 * BN_bn2binpad puts in leading 0 bytes if the number is less than
153 * the length of the modulus.
155 r = BN_bn2binpad(ret, to, num);
159 OPENSSL_clear_free(buf, num);
163 static BN_BLINDING *rsa_get_blinding(RSA *rsa, int *local, BN_CTX *ctx)
167 if (!CRYPTO_THREAD_write_lock(rsa->lock))
170 if (rsa->blinding == NULL) {
171 rsa->blinding = RSA_setup_blinding(rsa, ctx);
178 if (BN_BLINDING_is_current_thread(ret)) {
179 /* rsa->blinding is ours! */
183 /* resort to rsa->mt_blinding instead */
186 * instructs rsa_blinding_convert(), rsa_blinding_invert() that the
187 * BN_BLINDING is shared, meaning that accesses require locks, and
188 * that the blinding factor must be stored outside the BN_BLINDING
192 if (rsa->mt_blinding == NULL) {
193 rsa->mt_blinding = RSA_setup_blinding(rsa, ctx);
195 ret = rsa->mt_blinding;
199 CRYPTO_THREAD_unlock(rsa->lock);
203 static int rsa_blinding_convert(BN_BLINDING *b, BIGNUM *f, BIGNUM *unblind,
206 if (unblind == NULL) {
208 * Local blinding: store the unblinding factor in BN_BLINDING.
210 return BN_BLINDING_convert_ex(f, NULL, b, ctx);
213 * Shared blinding: store the unblinding factor outside BN_BLINDING.
217 if (!BN_BLINDING_lock(b))
220 ret = BN_BLINDING_convert_ex(f, unblind, b, ctx);
221 BN_BLINDING_unlock(b);
227 static int rsa_blinding_invert(BN_BLINDING *b, BIGNUM *f, BIGNUM *unblind,
231 * For local blinding, unblind is set to NULL, and BN_BLINDING_invert_ex
232 * will use the unblinding factor stored in BN_BLINDING. If BN_BLINDING
233 * is shared between threads, unblind must be non-null:
234 * BN_BLINDING_invert_ex will then use the local unblinding factor, and
235 * will only read the modulus from BN_BLINDING. In both cases it's safe
236 * to access the blinding without a lock.
238 return BN_BLINDING_invert_ex(f, unblind, b, ctx);
242 static int rsa_ossl_private_encrypt(int flen, const unsigned char *from,
243 unsigned char *to, RSA *rsa, int padding)
245 BIGNUM *f, *ret, *res;
246 int i, num = 0, r = -1;
247 unsigned char *buf = NULL;
249 int local_blinding = 0;
251 * Used only if the blinding structure is shared. A non-NULL unblind
252 * instructs rsa_blinding_convert() and rsa_blinding_invert() to store
253 * the unblinding factor outside the blinding structure.
255 BIGNUM *unblind = NULL;
256 BN_BLINDING *blinding = NULL;
258 if ((ctx = BN_CTX_new_ex(rsa->libctx)) == NULL)
262 ret = BN_CTX_get(ctx);
263 num = BN_num_bytes(rsa->n);
264 buf = OPENSSL_malloc(num);
265 if (ret == NULL || buf == NULL)
269 case RSA_PKCS1_PADDING:
270 i = RSA_padding_add_PKCS1_type_1(buf, num, from, flen);
272 case RSA_X931_PADDING:
273 i = RSA_padding_add_X931(buf, num, from, flen);
276 i = RSA_padding_add_none(buf, num, from, flen);
279 ERR_raise(ERR_LIB_RSA, RSA_R_UNKNOWN_PADDING_TYPE);
285 if (BN_bin2bn(buf, num, f) == NULL)
288 if (BN_ucmp(f, rsa->n) >= 0) {
289 /* usually the padding functions would catch this */
290 ERR_raise(ERR_LIB_RSA, RSA_R_DATA_TOO_LARGE_FOR_MODULUS);
294 if (rsa->flags & RSA_FLAG_CACHE_PUBLIC)
295 if (!BN_MONT_CTX_set_locked(&rsa->_method_mod_n, rsa->lock,
299 if (!(rsa->flags & RSA_FLAG_NO_BLINDING)) {
300 blinding = rsa_get_blinding(rsa, &local_blinding, ctx);
301 if (blinding == NULL) {
302 ERR_raise(ERR_LIB_RSA, ERR_R_INTERNAL_ERROR);
307 if (blinding != NULL) {
308 if (!local_blinding && ((unblind = BN_CTX_get(ctx)) == NULL)) {
309 ERR_raise(ERR_LIB_RSA, ERR_R_BN_LIB);
312 if (!rsa_blinding_convert(blinding, f, unblind, ctx))
316 if ((rsa->flags & RSA_FLAG_EXT_PKEY) ||
317 (rsa->version == RSA_ASN1_VERSION_MULTI) ||
320 (rsa->dmp1 != NULL) && (rsa->dmq1 != NULL) && (rsa->iqmp != NULL))) {
321 if (!rsa->meth->rsa_mod_exp(ret, f, rsa, ctx))
324 BIGNUM *d = BN_new();
326 ERR_raise(ERR_LIB_RSA, ERR_R_BN_LIB);
329 if (rsa->d == NULL) {
330 ERR_raise(ERR_LIB_RSA, RSA_R_MISSING_PRIVATE_KEY);
334 BN_with_flags(d, rsa->d, BN_FLG_CONSTTIME);
336 if (!rsa->meth->bn_mod_exp(ret, f, d, rsa->n, ctx,
337 rsa->_method_mod_n)) {
341 /* We MUST free d before any further use of rsa->d */
346 if (!rsa_blinding_invert(blinding, ret, unblind, ctx))
349 if (padding == RSA_X931_PADDING) {
350 if (!BN_sub(f, rsa->n, ret))
352 if (BN_cmp(ret, f) > 0)
361 * BN_bn2binpad puts in leading 0 bytes if the number is less than
362 * the length of the modulus.
364 r = BN_bn2binpad(res, to, num);
368 OPENSSL_clear_free(buf, num);
372 static int rsa_ossl_private_decrypt(int flen, const unsigned char *from,
373 unsigned char *to, RSA *rsa, int padding)
376 int j, num = 0, r = -1;
377 unsigned char *buf = NULL;
378 unsigned char d_hash[SHA256_DIGEST_LENGTH] = {0};
379 HMAC_CTX *hmac = NULL;
380 unsigned int md_len = SHA256_DIGEST_LENGTH;
381 unsigned char kdk[SHA256_DIGEST_LENGTH] = {0};
383 int local_blinding = 0;
386 * Used only if the blinding structure is shared. A non-NULL unblind
387 * instructs rsa_blinding_convert() and rsa_blinding_invert() to store
388 * the unblinding factor outside the blinding structure.
390 BIGNUM *unblind = NULL;
391 BN_BLINDING *blinding = NULL;
394 * we need the value of the private exponent to perform implicit rejection
396 if ((rsa->flags & RSA_FLAG_EXT_PKEY) && (padding == RSA_PKCS1_PADDING))
397 padding = RSA_PKCS1_NO_IMPLICIT_REJECT_PADDING;
399 if ((ctx = BN_CTX_new_ex(rsa->libctx)) == NULL)
403 ret = BN_CTX_get(ctx);
405 ERR_raise(ERR_LIB_RSA, ERR_R_BN_LIB);
408 num = BN_num_bytes(rsa->n);
409 buf = OPENSSL_malloc(num);
414 * This check was for equality but PGP does evil things and chops off the
418 ERR_raise(ERR_LIB_RSA, RSA_R_DATA_GREATER_THAN_MOD_LEN);
423 ERR_raise(ERR_LIB_RSA, RSA_R_DATA_TOO_SMALL);
427 /* make data into a big number */
428 if (BN_bin2bn(from, (int)flen, f) == NULL)
431 if (BN_ucmp(f, rsa->n) >= 0) {
432 ERR_raise(ERR_LIB_RSA, RSA_R_DATA_TOO_LARGE_FOR_MODULUS);
436 if (!(rsa->flags & RSA_FLAG_NO_BLINDING)) {
437 blinding = rsa_get_blinding(rsa, &local_blinding, ctx);
438 if (blinding == NULL) {
439 ERR_raise(ERR_LIB_RSA, ERR_R_INTERNAL_ERROR);
444 if (blinding != NULL) {
445 if (!local_blinding && ((unblind = BN_CTX_get(ctx)) == NULL)) {
446 ERR_raise(ERR_LIB_RSA, ERR_R_BN_LIB);
449 if (!rsa_blinding_convert(blinding, f, unblind, ctx))
454 if ((rsa->flags & RSA_FLAG_EXT_PKEY) ||
455 (rsa->version == RSA_ASN1_VERSION_MULTI) ||
458 (rsa->dmp1 != NULL) && (rsa->dmq1 != NULL) && (rsa->iqmp != NULL))) {
459 if (!rsa->meth->rsa_mod_exp(ret, f, rsa, ctx))
462 BIGNUM *d = BN_new();
464 ERR_raise(ERR_LIB_RSA, ERR_R_BN_LIB);
467 if (rsa->d == NULL) {
468 ERR_raise(ERR_LIB_RSA, RSA_R_MISSING_PRIVATE_KEY);
472 BN_with_flags(d, rsa->d, BN_FLG_CONSTTIME);
474 if (rsa->flags & RSA_FLAG_CACHE_PUBLIC)
475 if (!BN_MONT_CTX_set_locked(&rsa->_method_mod_n, rsa->lock,
480 if (!rsa->meth->bn_mod_exp(ret, f, d, rsa->n, ctx,
481 rsa->_method_mod_n)) {
485 /* We MUST free d before any further use of rsa->d */
490 if (!rsa_blinding_invert(blinding, ret, unblind, ctx))
494 * derive the Key Derivation Key from private exponent and public
497 if (padding == RSA_PKCS1_PADDING) {
499 * because we use d as a handle to rsa->d we need to keep it local and
500 * free before any further use of rsa->d
502 BIGNUM *d = BN_new();
504 ERR_raise(ERR_LIB_RSA, ERR_R_MALLOC_FAILURE);
507 if (rsa->d == NULL) {
508 ERR_raise(ERR_LIB_RSA, RSA_R_MISSING_PRIVATE_KEY);
512 BN_with_flags(d, rsa->d, BN_FLG_CONSTTIME);
513 if (BN_bn2binpad(d, buf, num) < 0) {
514 ERR_raise(ERR_LIB_RSA, ERR_R_INTERNAL_ERROR);
521 * we use hardcoded hash so that migrating between versions that use
522 * different hash doesn't provide a Bleichenbacher oracle:
523 * if the attacker can see that different versions return different
524 * messages for the same ciphertext, they'll know that the message is
525 * syntethically generated, which means that the padding check failed
527 md = EVP_MD_fetch(rsa->libctx, "sha256", NULL);
529 ERR_raise(ERR_LIB_RSA, ERR_R_INTERNAL_ERROR);
533 if (EVP_Digest(buf, num, d_hash, NULL, md, NULL) <= 0) {
534 ERR_raise(ERR_LIB_RSA, ERR_R_INTERNAL_ERROR);
538 hmac = HMAC_CTX_new();
540 ERR_raise(ERR_LIB_RSA, ERR_R_MALLOC_FAILURE);
544 if (HMAC_Init_ex(hmac, d_hash, sizeof(d_hash), md, NULL) <= 0) {
545 ERR_raise(ERR_LIB_RSA, ERR_R_INTERNAL_ERROR);
550 memset(buf, 0, num - flen);
551 if (HMAC_Update(hmac, buf, num - flen) <= 0) {
552 ERR_raise(ERR_LIB_RSA, ERR_R_INTERNAL_ERROR);
556 if (HMAC_Update(hmac, from, flen) <= 0) {
557 ERR_raise(ERR_LIB_RSA, ERR_R_INTERNAL_ERROR);
561 md_len = SHA256_DIGEST_LENGTH;
562 if (HMAC_Final(hmac, kdk, &md_len) <= 0) {
563 ERR_raise(ERR_LIB_RSA, ERR_R_INTERNAL_ERROR);
568 j = BN_bn2binpad(ret, buf, num);
573 case RSA_PKCS1_NO_IMPLICIT_REJECT_PADDING:
574 r = RSA_padding_check_PKCS1_type_2(to, num, buf, j, num);
576 case RSA_PKCS1_PADDING:
577 r = ossl_rsa_padding_check_PKCS1_type_2(rsa->libctx, to, num, buf, j, num, kdk);
579 case RSA_PKCS1_OAEP_PADDING:
580 r = RSA_padding_check_PKCS1_OAEP(to, num, buf, j, num, NULL, 0);
583 memcpy(to, buf, (r = j));
586 ERR_raise(ERR_LIB_RSA, RSA_R_UNKNOWN_PADDING_TYPE);
591 * This trick doesn't work in the FIPS provider because libcrypto manages
592 * the error stack. Instead we opt not to put an error on the stack at all
593 * in case of padding failure in the FIPS provider.
595 ERR_raise(ERR_LIB_RSA, RSA_R_PADDING_CHECK_FAILED);
596 err_clear_last_constant_time(1 & ~constant_time_msb(r));
604 OPENSSL_clear_free(buf, num);
608 /* signature verification */
609 static int rsa_ossl_public_decrypt(int flen, const unsigned char *from,
610 unsigned char *to, RSA *rsa, int padding)
613 int i, num = 0, r = -1;
614 unsigned char *buf = NULL;
617 if (BN_num_bits(rsa->n) > OPENSSL_RSA_MAX_MODULUS_BITS) {
618 ERR_raise(ERR_LIB_RSA, RSA_R_MODULUS_TOO_LARGE);
622 if (BN_ucmp(rsa->n, rsa->e) <= 0) {
623 ERR_raise(ERR_LIB_RSA, RSA_R_BAD_E_VALUE);
627 /* for large moduli, enforce exponent limit */
628 if (BN_num_bits(rsa->n) > OPENSSL_RSA_SMALL_MODULUS_BITS) {
629 if (BN_num_bits(rsa->e) > OPENSSL_RSA_MAX_PUBEXP_BITS) {
630 ERR_raise(ERR_LIB_RSA, RSA_R_BAD_E_VALUE);
635 if ((ctx = BN_CTX_new_ex(rsa->libctx)) == NULL)
639 ret = BN_CTX_get(ctx);
641 ERR_raise(ERR_LIB_RSA, ERR_R_BN_LIB);
644 num = BN_num_bytes(rsa->n);
645 buf = OPENSSL_malloc(num);
650 * This check was for equality but PGP does evil things and chops off the
654 ERR_raise(ERR_LIB_RSA, RSA_R_DATA_GREATER_THAN_MOD_LEN);
658 if (BN_bin2bn(from, flen, f) == NULL)
661 if (BN_ucmp(f, rsa->n) >= 0) {
662 ERR_raise(ERR_LIB_RSA, RSA_R_DATA_TOO_LARGE_FOR_MODULUS);
666 if (rsa->flags & RSA_FLAG_CACHE_PUBLIC)
667 if (!BN_MONT_CTX_set_locked(&rsa->_method_mod_n, rsa->lock,
671 if (!rsa->meth->bn_mod_exp(ret, f, rsa->e, rsa->n, ctx,
675 if ((padding == RSA_X931_PADDING) && ((bn_get_words(ret)[0] & 0xf) != 12))
676 if (!BN_sub(ret, rsa->n, ret))
679 i = BN_bn2binpad(ret, buf, num);
684 case RSA_PKCS1_PADDING:
685 r = RSA_padding_check_PKCS1_type_1(to, num, buf, i, num);
687 case RSA_X931_PADDING:
688 r = RSA_padding_check_X931(to, num, buf, i, num);
691 memcpy(to, buf, (r = i));
694 ERR_raise(ERR_LIB_RSA, RSA_R_UNKNOWN_PADDING_TYPE);
698 ERR_raise(ERR_LIB_RSA, RSA_R_PADDING_CHECK_FAILED);
703 OPENSSL_clear_free(buf, num);
707 static int rsa_ossl_mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx)
709 BIGNUM *r1, *m1, *vrfy;
710 int ret = 0, smooth = 0;
712 BIGNUM *r2, *m[RSA_MAX_PRIME_NUM - 2];
713 int i, ex_primes = 0;
714 RSA_PRIME_INFO *pinfo;
719 r1 = BN_CTX_get(ctx);
721 r2 = BN_CTX_get(ctx);
723 m1 = BN_CTX_get(ctx);
724 vrfy = BN_CTX_get(ctx);
729 if (rsa->version == RSA_ASN1_VERSION_MULTI
730 && ((ex_primes = sk_RSA_PRIME_INFO_num(rsa->prime_infos)) <= 0
731 || ex_primes > RSA_MAX_PRIME_NUM - 2))
735 if (rsa->flags & RSA_FLAG_CACHE_PRIVATE) {
736 BIGNUM *factor = BN_new();
742 * Make sure BN_mod_inverse in Montgomery initialization uses the
743 * BN_FLG_CONSTTIME flag
745 if (!(BN_with_flags(factor, rsa->p, BN_FLG_CONSTTIME),
746 BN_MONT_CTX_set_locked(&rsa->_method_mod_p, rsa->lock,
748 || !(BN_with_flags(factor, rsa->q, BN_FLG_CONSTTIME),
749 BN_MONT_CTX_set_locked(&rsa->_method_mod_q, rsa->lock,
755 for (i = 0; i < ex_primes; i++) {
756 pinfo = sk_RSA_PRIME_INFO_value(rsa->prime_infos, i);
757 BN_with_flags(factor, pinfo->r, BN_FLG_CONSTTIME);
758 if (!BN_MONT_CTX_set_locked(&pinfo->m, rsa->lock, factor, ctx)) {
765 * We MUST free |factor| before any further use of the prime factors
769 smooth = (rsa->meth->bn_mod_exp == BN_mod_exp_mont)
773 && (BN_num_bits(rsa->q) == BN_num_bits(rsa->p));
776 if (rsa->flags & RSA_FLAG_CACHE_PUBLIC)
777 if (!BN_MONT_CTX_set_locked(&rsa->_method_mod_n, rsa->lock,
783 * Conversion from Montgomery domain, a.k.a. Montgomery reduction,
784 * accepts values in [0-m*2^w) range. w is m's bit width rounded up
785 * to limb width. So that at the very least if |I| is fully reduced,
786 * i.e. less than p*q, we can count on from-to round to perform
787 * below modulo operations on |I|. Unlike BN_mod it's constant time.
789 if (/* m1 = I moq q */
790 !bn_from_mont_fixed_top(m1, I, rsa->_method_mod_q, ctx)
791 || !bn_to_mont_fixed_top(m1, m1, rsa->_method_mod_q, ctx)
793 || !bn_from_mont_fixed_top(r1, I, rsa->_method_mod_p, ctx)
794 || !bn_to_mont_fixed_top(r1, r1, rsa->_method_mod_p, ctx)
796 * Use parallel exponentiations optimization if possible,
797 * otherwise fallback to two sequential exponentiations:
801 || !BN_mod_exp_mont_consttime_x2(m1, m1, rsa->dmq1, rsa->q,
803 r1, r1, rsa->dmp1, rsa->p,
806 /* r1 = (r1 - m1) mod p */
808 * bn_mod_sub_fixed_top is not regular modular subtraction,
809 * it can tolerate subtrahend to be larger than modulus, but
810 * not bit-wise wider. This makes up for uncommon q>p case,
811 * when |m1| can be larger than |rsa->p|.
813 || !bn_mod_sub_fixed_top(r1, r1, m1, rsa->p)
815 /* r1 = r1 * iqmp mod p */
816 || !bn_to_mont_fixed_top(r1, r1, rsa->_method_mod_p, ctx)
817 || !bn_mul_mont_fixed_top(r1, r1, rsa->iqmp, rsa->_method_mod_p,
819 /* r0 = r1 * q + m1 */
820 || !bn_mul_fixed_top(r0, r1, rsa->q, ctx)
821 || !bn_mod_add_fixed_top(r0, r0, m1, rsa->n))
827 /* compute I mod q */
829 BIGNUM *c = BN_new();
832 BN_with_flags(c, I, BN_FLG_CONSTTIME);
834 if (!BN_mod(r1, c, rsa->q, ctx)) {
840 BIGNUM *dmq1 = BN_new();
845 BN_with_flags(dmq1, rsa->dmq1, BN_FLG_CONSTTIME);
847 /* compute r1^dmq1 mod q */
848 if (!rsa->meth->bn_mod_exp(m1, r1, dmq1, rsa->q, ctx,
849 rsa->_method_mod_q)) {
854 /* We MUST free dmq1 before any further use of rsa->dmq1 */
858 /* compute I mod p */
859 if (!BN_mod(r1, c, rsa->p, ctx)) {
863 /* We MUST free c before any further use of I */
868 BIGNUM *dmp1 = BN_new();
871 BN_with_flags(dmp1, rsa->dmp1, BN_FLG_CONSTTIME);
873 /* compute r1^dmp1 mod p */
874 if (!rsa->meth->bn_mod_exp(r0, r1, dmp1, rsa->p, ctx,
875 rsa->_method_mod_p)) {
879 /* We MUST free dmp1 before any further use of rsa->dmp1 */
885 BIGNUM *di = BN_new(), *cc = BN_new();
887 if (cc == NULL || di == NULL) {
893 for (i = 0; i < ex_primes; i++) {
895 if ((m[i] = BN_CTX_get(ctx)) == NULL) {
901 pinfo = sk_RSA_PRIME_INFO_value(rsa->prime_infos, i);
903 /* prepare c and d_i */
904 BN_with_flags(cc, I, BN_FLG_CONSTTIME);
905 BN_with_flags(di, pinfo->d, BN_FLG_CONSTTIME);
907 if (!BN_mod(r1, cc, pinfo->r, ctx)) {
912 /* compute r1 ^ d_i mod r_i */
913 if (!rsa->meth->bn_mod_exp(m[i], r1, di, pinfo->r, ctx, pinfo->m)) {
925 if (!BN_sub(r0, r0, m1))
928 * This will help stop the size of r0 increasing, which does affect the
929 * multiply if it optimised for a power of 2 size
931 if (BN_is_negative(r0))
932 if (!BN_add(r0, r0, rsa->p))
935 if (!BN_mul(r1, r0, rsa->iqmp, ctx))
939 BIGNUM *pr1 = BN_new();
942 BN_with_flags(pr1, r1, BN_FLG_CONSTTIME);
944 if (!BN_mod(r0, pr1, rsa->p, ctx)) {
948 /* We MUST free pr1 before any further use of r1 */
953 * If p < q it is occasionally possible for the correction of adding 'p'
954 * if r0 is negative above to leave the result still negative. This can
955 * break the private key operations: the following second correction
956 * should *always* correct this rare occurrence. This will *never* happen
957 * with OpenSSL generated keys because they ensure p > q [steve]
959 if (BN_is_negative(r0))
960 if (!BN_add(r0, r0, rsa->p))
962 if (!BN_mul(r1, r0, rsa->q, ctx))
964 if (!BN_add(r0, r1, m1))
968 /* add m_i to m in multi-prime case */
970 BIGNUM *pr2 = BN_new();
975 for (i = 0; i < ex_primes; i++) {
976 pinfo = sk_RSA_PRIME_INFO_value(rsa->prime_infos, i);
977 if (!BN_sub(r1, m[i], r0)) {
982 if (!BN_mul(r2, r1, pinfo->t, ctx)) {
987 BN_with_flags(pr2, r2, BN_FLG_CONSTTIME);
989 if (!BN_mod(r1, pr2, pinfo->r, ctx)) {
994 if (BN_is_negative(r1))
995 if (!BN_add(r1, r1, pinfo->r)) {
999 if (!BN_mul(r1, r1, pinfo->pp, ctx)) {
1003 if (!BN_add(r0, r0, r1)) {
1013 if (rsa->e && rsa->n) {
1014 if (rsa->meth->bn_mod_exp == BN_mod_exp_mont) {
1015 if (!BN_mod_exp_mont(vrfy, r0, rsa->e, rsa->n, ctx,
1016 rsa->_method_mod_n))
1020 if (!rsa->meth->bn_mod_exp(vrfy, r0, rsa->e, rsa->n, ctx,
1021 rsa->_method_mod_n))
1025 * If 'I' was greater than (or equal to) rsa->n, the operation will
1026 * be equivalent to using 'I mod n'. However, the result of the
1027 * verify will *always* be less than 'n' so we don't check for
1028 * absolute equality, just congruency.
1030 if (!BN_sub(vrfy, vrfy, I))
1032 if (BN_is_zero(vrfy)) {
1035 goto err; /* not actually error */
1037 if (!BN_mod(vrfy, vrfy, rsa->n, ctx))
1039 if (BN_is_negative(vrfy))
1040 if (!BN_add(vrfy, vrfy, rsa->n))
1042 if (!BN_is_zero(vrfy)) {
1044 * 'I' and 'vrfy' aren't congruent mod n. Don't leak
1045 * miscalculated CRT output, just do a raw (slower) mod_exp and
1046 * return that instead.
1049 BIGNUM *d = BN_new();
1052 BN_with_flags(d, rsa->d, BN_FLG_CONSTTIME);
1054 if (!rsa->meth->bn_mod_exp(r0, I, d, rsa->n, ctx,
1055 rsa->_method_mod_n)) {
1059 /* We MUST free d before any further use of rsa->d */
1064 * It's unfortunate that we have to bn_correct_top(r0). What hopefully
1065 * saves the day is that correction is highly unlike, and private key
1066 * operations are customarily performed on blinded message. Which means
1067 * that attacker won't observe correlation with chosen plaintext.
1068 * Secondly, remaining code would still handle it in same computational
1069 * time and even conceal memory access pattern around corrected top.
1078 static int rsa_ossl_init(RSA *rsa)
1080 rsa->flags |= RSA_FLAG_CACHE_PUBLIC | RSA_FLAG_CACHE_PRIVATE;
1084 static int rsa_ossl_finish(RSA *rsa)
1088 RSA_PRIME_INFO *pinfo;
1090 for (i = 0; i < sk_RSA_PRIME_INFO_num(rsa->prime_infos); i++) {
1091 pinfo = sk_RSA_PRIME_INFO_value(rsa->prime_infos, i);
1092 BN_MONT_CTX_free(pinfo->m);
1096 BN_MONT_CTX_free(rsa->_method_mod_n);
1097 BN_MONT_CTX_free(rsa->_method_mod_p);
1098 BN_MONT_CTX_free(rsa->_method_mod_q);