2 * Copyright 1995-2018 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the OpenSSL license (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
11 * Details about Montgomery multiplication algorithms can be found at
12 * http://security.ece.orst.edu/publications.html, e.g.
13 * http://security.ece.orst.edu/koc/papers/j37acmon.pdf and
14 * sections 3.8 and 4.2 in http://security.ece.orst.edu/koc/papers/r01rsasw.pdf
17 #include "internal/cryptlib.h"
20 #define MONT_WORD /* use the faster word-based algorithm */
23 static int bn_from_montgomery_word(BIGNUM *ret, BIGNUM *r, BN_MONT_CTX *mont);
26 int BN_mod_mul_montgomery(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
27 BN_MONT_CTX *mont, BN_CTX *ctx)
29 int ret = bn_mul_mont_fixed_top(r, a, b, mont, ctx);
37 int bn_mul_mont_fixed_top(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
38 BN_MONT_CTX *mont, BN_CTX *ctx)
42 int num = mont->N.top;
44 #if defined(OPENSSL_BN_ASM_MONT) && defined(MONT_WORD)
45 if (num > 1 && a->top == num && b->top == num) {
46 if (bn_wexpand(r, num) == NULL)
48 if (bn_mul_mont(r->d, a->d, b->d, mont->N.d, mont->n0, num)) {
49 r->neg = a->neg ^ b->neg;
51 r->flags |= BN_FLG_FIXED_TOP;
57 if ((a->top + b->top) > 2 * num)
61 tmp = BN_CTX_get(ctx);
67 if (!BN_sqr(tmp, a, ctx))
70 if (!BN_mul(tmp, a, b, ctx))
73 /* reduce from aRR to aR */
75 if (!bn_from_montgomery_word(r, tmp, mont))
78 if (!BN_from_montgomery(r, tmp, mont, ctx))
88 static int bn_from_montgomery_word(BIGNUM *ret, BIGNUM *r, BN_MONT_CTX *mont)
91 BN_ULONG *ap, *np, *rp, n0, v, carry;
101 max = (2 * nl); /* carry is stored separately */
102 if (bn_wexpand(r, max) == NULL)
109 /* clear the top words of T */
112 memset(&rp[r->top], 0, sizeof(*rp) * i);
115 r->flags |= BN_FLG_FIXED_TOP;
119 * Add multiples of |n| to |r| until R = 2^(nl * BN_BITS2) divides it. On
120 * input, we had |r| < |n| * R, so now |r| < 2 * |n| * R. Note that |r|
121 * includes |carry| which is stored separately.
123 for (carry = 0, i = 0; i < nl; i++, rp++) {
124 v = bn_mul_add_words(rp, np, nl, (rp[0] * n0) & BN_MASK2);
125 v = (v + carry + rp[nl]) & BN_MASK2;
126 carry |= (v != rp[nl]);
127 carry &= (v <= rp[nl]);
131 if (bn_wexpand(ret, nl) == NULL)
134 ret->flags |= BN_FLG_FIXED_TOP;
140 * Shift |nl| words to divide by R. We have |ap| < 2 * |n|. Note that |ap|
141 * includes |carry| which is stored separately.
145 carry -= bn_sub_words(rp, ap, np, nl);
147 * |carry| is -1 if |ap| - |np| underflowed or zero if it did not. Note
148 * |carry| cannot be 1. That would imply the subtraction did not fit in
149 * |nl| words, and we know at most one subtraction is needed.
151 for (i = 0; i < nl; i++) {
152 rp[i] = (carry & ap[i]) | (~carry & rp[i]);
158 #endif /* MONT_WORD */
160 int BN_from_montgomery(BIGNUM *ret, const BIGNUM *a, BN_MONT_CTX *mont,
168 if ((t = BN_CTX_get(ctx)) && BN_copy(t, a)) {
169 retn = bn_from_montgomery_word(ret, t, mont);
174 #else /* !MONT_WORD */
178 t1 = BN_CTX_get(ctx);
179 t2 = BN_CTX_get(ctx);
185 BN_mask_bits(t1, mont->ri);
187 if (!BN_mul(t2, t1, &mont->Ni, ctx))
189 BN_mask_bits(t2, mont->ri);
191 if (!BN_mul(t1, t2, &mont->N, ctx))
193 if (!BN_add(t2, a, t1))
195 if (!BN_rshift(ret, t2, mont->ri))
198 if (BN_ucmp(ret, &(mont->N)) >= 0) {
199 if (!BN_usub(ret, ret, &(mont->N)))
206 #endif /* MONT_WORD */
210 int bn_to_mont_fixed_top(BIGNUM *r, const BIGNUM *a, BN_MONT_CTX *mont,
213 return bn_mul_mont_fixed_top(r, a, &(mont->RR), mont, ctx);
216 BN_MONT_CTX *BN_MONT_CTX_new(void)
220 if ((ret = OPENSSL_malloc(sizeof(*ret))) == NULL) {
221 BNerr(BN_F_BN_MONT_CTX_NEW, ERR_R_MALLOC_FAILURE);
225 BN_MONT_CTX_init(ret);
226 ret->flags = BN_FLG_MALLOCED;
230 void BN_MONT_CTX_init(BN_MONT_CTX *ctx)
236 ctx->n0[0] = ctx->n0[1] = 0;
240 void BN_MONT_CTX_free(BN_MONT_CTX *mont)
244 BN_clear_free(&mont->RR);
245 BN_clear_free(&mont->N);
246 BN_clear_free(&mont->Ni);
247 if (mont->flags & BN_FLG_MALLOCED)
251 int BN_MONT_CTX_set(BN_MONT_CTX *mont, const BIGNUM *mod, BN_CTX *ctx)
260 if ((Ri = BN_CTX_get(ctx)) == NULL)
262 R = &(mont->RR); /* grab RR as a temp */
263 if (!BN_copy(&(mont->N), mod))
264 goto err; /* Set N */
265 if (BN_get_flags(mod, BN_FLG_CONSTTIME) != 0)
266 BN_set_flags(&(mont->N), BN_FLG_CONSTTIME);
279 if (BN_get_flags(mod, BN_FLG_CONSTTIME) != 0)
280 BN_set_flags(&tmod, BN_FLG_CONSTTIME);
282 mont->ri = (BN_num_bits(mod) + (BN_BITS2 - 1)) / BN_BITS2 * BN_BITS2;
284 # if defined(OPENSSL_BN_ASM_MONT) && (BN_BITS2<=32)
286 * Only certain BN_BITS2<=32 platforms actually make use of n0[1],
287 * and we could use the #else case (with a shorter R value) for the
288 * others. However, currently only the assembler files do know which
293 if (!(BN_set_bit(R, 2 * BN_BITS2)))
297 if ((buf[0] = mod->d[0]))
299 if ((buf[1] = mod->top > 1 ? mod->d[1] : 0))
302 if (BN_is_one(&tmod))
304 else if ((BN_mod_inverse(Ri, R, &tmod, ctx)) == NULL)
306 if (!BN_lshift(Ri, Ri, 2 * BN_BITS2))
308 if (!BN_is_zero(Ri)) {
309 if (!BN_sub_word(Ri, 1))
311 } else { /* if N mod word size == 1 */
313 if (bn_expand(Ri, (int)sizeof(BN_ULONG) * 2) == NULL)
315 /* Ri-- (mod double word size) */
321 if (!BN_div(Ri, NULL, Ri, &tmod, ctx))
324 * Ni = (R*Ri-1)/N, keep only couple of least significant words:
326 mont->n0[0] = (Ri->top > 0) ? Ri->d[0] : 0;
327 mont->n0[1] = (Ri->top > 1) ? Ri->d[1] : 0;
330 if (!(BN_set_bit(R, BN_BITS2)))
333 buf[0] = mod->d[0]; /* tmod = N mod word size */
335 tmod.top = buf[0] != 0 ? 1 : 0;
336 /* Ri = R^-1 mod N */
337 if (BN_is_one(&tmod))
339 else if ((BN_mod_inverse(Ri, R, &tmod, ctx)) == NULL)
341 if (!BN_lshift(Ri, Ri, BN_BITS2))
343 if (!BN_is_zero(Ri)) {
344 if (!BN_sub_word(Ri, 1))
346 } else { /* if N mod word size == 1 */
348 if (!BN_set_word(Ri, BN_MASK2))
349 goto err; /* Ri-- (mod word size) */
351 if (!BN_div(Ri, NULL, Ri, &tmod, ctx))
354 * Ni = (R*Ri-1)/N, keep only least significant word:
356 mont->n0[0] = (Ri->top > 0) ? Ri->d[0] : 0;
360 #else /* !MONT_WORD */
361 { /* bignum version */
362 mont->ri = BN_num_bits(&mont->N);
364 if (!BN_set_bit(R, mont->ri))
365 goto err; /* R = 2^ri */
366 /* Ri = R^-1 mod N */
367 if ((BN_mod_inverse(Ri, R, &mont->N, ctx)) == NULL)
369 if (!BN_lshift(Ri, Ri, mont->ri))
371 if (!BN_sub_word(Ri, 1))
376 if (!BN_div(&(mont->Ni), NULL, Ri, &mont->N, ctx))
381 /* setup RR for conversions */
382 BN_zero(&(mont->RR));
383 if (!BN_set_bit(&(mont->RR), mont->ri * 2))
385 if (!BN_mod(&(mont->RR), &(mont->RR), &(mont->N), ctx))
388 for (i = mont->RR.top, ret = mont->N.top; i < ret; i++)
391 mont->RR.flags |= BN_FLG_FIXED_TOP;
399 BN_MONT_CTX *BN_MONT_CTX_copy(BN_MONT_CTX *to, BN_MONT_CTX *from)
404 if (!BN_copy(&(to->RR), &(from->RR)))
406 if (!BN_copy(&(to->N), &(from->N)))
408 if (!BN_copy(&(to->Ni), &(from->Ni)))
411 to->n0[0] = from->n0[0];
412 to->n0[1] = from->n0[1];
416 BN_MONT_CTX *BN_MONT_CTX_set_locked(BN_MONT_CTX **pmont, CRYPTO_RWLOCK *lock,
417 const BIGNUM *mod, BN_CTX *ctx)
421 CRYPTO_THREAD_read_lock(lock);
423 CRYPTO_THREAD_unlock(lock);
428 * We don't want to serialise globally while doing our lazy-init math in
429 * BN_MONT_CTX_set. That punishes threads that are doing independent
430 * things. Instead, punish the case where more than one thread tries to
431 * lazy-init the same 'pmont', by having each do the lazy-init math work
432 * independently and only use the one from the thread that wins the race
433 * (the losers throw away the work they've done).
435 ret = BN_MONT_CTX_new();
438 if (!BN_MONT_CTX_set(ret, mod, ctx)) {
439 BN_MONT_CTX_free(ret);
443 /* The locked compare-and-set, after the local work is done. */
444 CRYPTO_THREAD_write_lock(lock);
446 BN_MONT_CTX_free(ret);
450 CRYPTO_THREAD_unlock(lock);