2 * Copyright 1995-2018 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the OpenSSL license (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
10 #include "internal/cryptlib.h"
11 #include "internal/constant_time_locl.h"
18 # define alloca _alloca
20 #elif defined(__GNUC__)
22 # define alloca(s) __builtin_alloca((s))
31 #if defined(OPENSSL_BN_ASM_MONT) && (defined(__sparc__) || defined(__sparc))
32 # include "sparc_arch.h"
33 extern unsigned int OPENSSL_sparcv9cap_P[];
34 # define SPARC_T4_MONT
37 /* maximum precomputation table size for *variable* sliding windows */
40 /* this one works - simple but works */
41 int BN_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
46 if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0
47 || BN_get_flags(a, BN_FLG_CONSTTIME) != 0) {
48 /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */
49 BNerr(BN_F_BN_EXP, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
54 rr = ((r == a) || (r == p)) ? BN_CTX_get(ctx) : r;
56 if (rr == NULL || v == NULL)
59 if (BN_copy(v, a) == NULL)
61 bits = BN_num_bits(p);
64 if (BN_copy(rr, a) == NULL)
71 for (i = 1; i < bits; i++) {
72 if (!BN_sqr(v, v, ctx))
74 if (BN_is_bit_set(p, i)) {
75 if (!BN_mul(rr, rr, v, ctx))
79 if (r != rr && BN_copy(r, rr) == NULL)
89 int BN_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m,
99 * For even modulus m = 2^k*m_odd, it might make sense to compute
100 * a^p mod m_odd and a^p mod 2^k separately (with Montgomery
101 * exponentiation for the odd part), using appropriate exponent
102 * reductions, and combine the results using the CRT.
104 * For now, we use Montgomery only if the modulus is odd; otherwise,
105 * exponentiation using the reciprocal-based quick remaindering
108 * (Timing obtained with expspeed.c [computations a^p mod m
109 * where a, p, m are of the same length: 256, 512, 1024, 2048,
110 * 4096, 8192 bits], compared to the running time of the
111 * standard algorithm:
113 * BN_mod_exp_mont 33 .. 40 % [AMD K6-2, Linux, debug configuration]
114 * 55 .. 77 % [UltraSparc processor, but
115 * debug-solaris-sparcv8-gcc conf.]
117 * BN_mod_exp_recp 50 .. 70 % [AMD K6-2, Linux, debug configuration]
118 * 62 .. 118 % [UltraSparc, debug-solaris-sparcv8-gcc]
120 * On the Sparc, BN_mod_exp_recp was faster than BN_mod_exp_mont
121 * at 2048 and more bits, but at 512 and 1024 bits, it was
122 * slower even than the standard algorithm!
124 * "Real" timings [linux-elf, solaris-sparcv9-gcc configurations]
125 * should be obtained when the new Montgomery reduction code
126 * has been integrated into OpenSSL.)
130 #define MONT_EXP_WORD
135 # ifdef MONT_EXP_WORD
136 if (a->top == 1 && !a->neg
137 && (BN_get_flags(p, BN_FLG_CONSTTIME) == 0)
138 && (BN_get_flags(a, BN_FLG_CONSTTIME) == 0)
139 && (BN_get_flags(m, BN_FLG_CONSTTIME) == 0)) {
140 BN_ULONG A = a->d[0];
141 ret = BN_mod_exp_mont_word(r, A, p, m, ctx, NULL);
144 ret = BN_mod_exp_mont(r, a, p, m, ctx, NULL);
149 ret = BN_mod_exp_recp(r, a, p, m, ctx);
153 ret = BN_mod_exp_simple(r, a, p, m, ctx);
161 int BN_mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
162 const BIGNUM *m, BN_CTX *ctx)
164 int i, j, bits, ret = 0, wstart, wend, window, wvalue;
167 /* Table of variables obtained from 'ctx' */
168 BIGNUM *val[TABLE_SIZE];
171 if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0
172 || BN_get_flags(a, BN_FLG_CONSTTIME) != 0
173 || BN_get_flags(m, BN_FLG_CONSTTIME) != 0) {
174 /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */
175 BNerr(BN_F_BN_MOD_EXP_RECP, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
179 bits = BN_num_bits(p);
181 /* x**0 mod 1, or x**0 mod -1 is still zero. */
182 if (BN_abs_is_word(m, 1)) {
192 aa = BN_CTX_get(ctx);
193 val[0] = BN_CTX_get(ctx);
197 BN_RECP_CTX_init(&recp);
199 /* ignore sign of 'm' */
203 if (BN_RECP_CTX_set(&recp, aa, ctx) <= 0)
206 if (BN_RECP_CTX_set(&recp, m, ctx) <= 0)
210 if (!BN_nnmod(val[0], a, m, ctx))
212 if (BN_is_zero(val[0])) {
218 window = BN_window_bits_for_exponent_size(bits);
220 if (!BN_mod_mul_reciprocal(aa, val[0], val[0], &recp, ctx))
222 j = 1 << (window - 1);
223 for (i = 1; i < j; i++) {
224 if (((val[i] = BN_CTX_get(ctx)) == NULL) ||
225 !BN_mod_mul_reciprocal(val[i], val[i - 1], aa, &recp, ctx))
230 start = 1; /* This is used to avoid multiplication etc
231 * when there is only the value '1' in the
233 wvalue = 0; /* The 'value' of the window */
234 wstart = bits - 1; /* The top bit of the window */
235 wend = 0; /* The bottom bit of the window */
241 if (BN_is_bit_set(p, wstart) == 0) {
243 if (!BN_mod_mul_reciprocal(r, r, r, &recp, ctx))
251 * We now have wstart on a 'set' bit, we now need to work out how bit
252 * a window to do. To do this we need to scan forward until the last
253 * set bit before the end of the window
258 for (i = 1; i < window; i++) {
261 if (BN_is_bit_set(p, wstart - i)) {
262 wvalue <<= (i - wend);
268 /* wend is the size of the current window */
270 /* add the 'bytes above' */
272 for (i = 0; i < j; i++) {
273 if (!BN_mod_mul_reciprocal(r, r, r, &recp, ctx))
277 /* wvalue will be an odd number < 2^window */
278 if (!BN_mod_mul_reciprocal(r, r, val[wvalue >> 1], &recp, ctx))
281 /* move the 'window' down further */
291 BN_RECP_CTX_free(&recp);
296 int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
297 const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *in_mont)
299 int i, j, bits, ret = 0, wstart, wend, window, wvalue;
303 /* Table of variables obtained from 'ctx' */
304 BIGNUM *val[TABLE_SIZE];
305 BN_MONT_CTX *mont = NULL;
307 if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0
308 || BN_get_flags(a, BN_FLG_CONSTTIME) != 0
309 || BN_get_flags(m, BN_FLG_CONSTTIME) != 0) {
310 return BN_mod_exp_mont_consttime(rr, a, p, m, ctx, in_mont);
318 BNerr(BN_F_BN_MOD_EXP_MONT, BN_R_CALLED_WITH_EVEN_MODULUS);
321 bits = BN_num_bits(p);
323 /* x**0 mod 1, or x**0 mod -1 is still zero. */
324 if (BN_abs_is_word(m, 1)) {
336 val[0] = BN_CTX_get(ctx);
341 * If this is not done, things will break in the montgomery part
347 if ((mont = BN_MONT_CTX_new()) == NULL)
349 if (!BN_MONT_CTX_set(mont, m, ctx))
353 if (a->neg || BN_ucmp(a, m) >= 0) {
354 if (!BN_nnmod(val[0], a, m, ctx))
359 if (!bn_to_mont_fixed_top(val[0], aa, mont, ctx))
362 window = BN_window_bits_for_exponent_size(bits);
364 if (!bn_mul_mont_fixed_top(d, val[0], val[0], mont, ctx))
366 j = 1 << (window - 1);
367 for (i = 1; i < j; i++) {
368 if (((val[i] = BN_CTX_get(ctx)) == NULL) ||
369 !bn_mul_mont_fixed_top(val[i], val[i - 1], d, mont, ctx))
374 start = 1; /* This is used to avoid multiplication etc
375 * when there is only the value '1' in the
377 wvalue = 0; /* The 'value' of the window */
378 wstart = bits - 1; /* The top bit of the window */
379 wend = 0; /* The bottom bit of the window */
381 #if 1 /* by Shay Gueron's suggestion */
382 j = m->top; /* borrow j */
383 if (m->d[j - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) {
384 if (bn_wexpand(r, j) == NULL)
386 /* 2^(top*BN_BITS2) - m */
387 r->d[0] = (0 - m->d[0]) & BN_MASK2;
388 for (i = 1; i < j; i++)
389 r->d[i] = (~m->d[i]) & BN_MASK2;
391 r->flags |= BN_FLG_FIXED_TOP;
394 if (!bn_to_mont_fixed_top(r, BN_value_one(), mont, ctx))
397 if (BN_is_bit_set(p, wstart) == 0) {
399 if (!bn_mul_mont_fixed_top(r, r, r, mont, ctx))
408 * We now have wstart on a 'set' bit, we now need to work out how bit
409 * a window to do. To do this we need to scan forward until the last
410 * set bit before the end of the window
415 for (i = 1; i < window; i++) {
418 if (BN_is_bit_set(p, wstart - i)) {
419 wvalue <<= (i - wend);
425 /* wend is the size of the current window */
427 /* add the 'bytes above' */
429 for (i = 0; i < j; i++) {
430 if (!bn_mul_mont_fixed_top(r, r, r, mont, ctx))
434 /* wvalue will be an odd number < 2^window */
435 if (!bn_mul_mont_fixed_top(r, r, val[wvalue >> 1], mont, ctx))
438 /* move the 'window' down further */
446 * Done with zero-padded intermediate BIGNUMs. Final BN_from_montgomery
447 * removes padding [if any] and makes return value suitable for public
450 #if defined(SPARC_T4_MONT)
451 if (OPENSSL_sparcv9cap_P[0] & (SPARCV9_VIS3 | SPARCV9_PREFER_FPU)) {
452 j = mont->N.top; /* borrow j */
453 val[0]->d[0] = 1; /* borrow val[0] */
454 for (i = 1; i < j; i++)
457 if (!BN_mod_mul_montgomery(rr, r, val[0], mont, ctx))
461 if (!BN_from_montgomery(rr, r, mont, ctx))
466 BN_MONT_CTX_free(mont);
472 static BN_ULONG bn_get_bits(const BIGNUM *a, int bitpos)
477 wordpos = bitpos / BN_BITS2;
479 if (wordpos >= 0 && wordpos < a->top) {
480 ret = a->d[wordpos] & BN_MASK2;
483 if (++wordpos < a->top)
484 ret |= a->d[wordpos] << (BN_BITS2 - bitpos);
488 return ret & BN_MASK2;
492 * BN_mod_exp_mont_consttime() stores the precomputed powers in a specific
493 * layout so that accessing any of these table values shows the same access
494 * pattern as far as cache lines are concerned. The following functions are
495 * used to transfer a BIGNUM from/to that table.
498 static int MOD_EXP_CTIME_COPY_TO_PREBUF(const BIGNUM *b, int top,
499 unsigned char *buf, int idx,
503 int width = 1 << window;
504 BN_ULONG *table = (BN_ULONG *)buf;
507 top = b->top; /* this works because 'buf' is explicitly
509 for (i = 0, j = idx; i < top; i++, j += width) {
516 static int MOD_EXP_CTIME_COPY_FROM_PREBUF(BIGNUM *b, int top,
517 unsigned char *buf, int idx,
521 int width = 1 << window;
523 * We declare table 'volatile' in order to discourage compiler
524 * from reordering loads from the table. Concern is that if
525 * reordered in specific manner loads might give away the
526 * information we are trying to conceal. Some would argue that
527 * compiler can reorder them anyway, but it can as well be
528 * argued that doing so would be violation of standard...
530 volatile BN_ULONG *table = (volatile BN_ULONG *)buf;
532 if (bn_wexpand(b, top) == NULL)
536 for (i = 0; i < top; i++, table += width) {
539 for (j = 0; j < width; j++) {
541 ((BN_ULONG)0 - (constant_time_eq_int(j,idx)&1));
547 int xstride = 1 << (window - 2);
548 BN_ULONG y0, y1, y2, y3;
550 i = idx >> (window - 2); /* equivalent of idx / xstride */
551 idx &= xstride - 1; /* equivalent of idx % xstride */
553 y0 = (BN_ULONG)0 - (constant_time_eq_int(i,0)&1);
554 y1 = (BN_ULONG)0 - (constant_time_eq_int(i,1)&1);
555 y2 = (BN_ULONG)0 - (constant_time_eq_int(i,2)&1);
556 y3 = (BN_ULONG)0 - (constant_time_eq_int(i,3)&1);
558 for (i = 0; i < top; i++, table += width) {
561 for (j = 0; j < xstride; j++) {
562 acc |= ( (table[j + 0 * xstride] & y0) |
563 (table[j + 1 * xstride] & y1) |
564 (table[j + 2 * xstride] & y2) |
565 (table[j + 3 * xstride] & y3) )
566 & ((BN_ULONG)0 - (constant_time_eq_int(j,idx)&1));
574 b->flags |= BN_FLG_FIXED_TOP;
579 * Given a pointer value, compute the next address that is a cache line
582 #define MOD_EXP_CTIME_ALIGN(x_) \
583 ((unsigned char*)(x_) + (MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH - (((size_t)(x_)) & (MOD_EXP_CTIME_MIN_CACHE_LINE_MASK))))
586 * This variant of BN_mod_exp_mont() uses fixed windows and the special
587 * precomputation memory layout to limit data-dependency to a minimum to
588 * protect secret exponents (cf. the hyper-threading timing attacks pointed
589 * out by Colin Percival,
590 * http://www.daemonology.net/hyperthreading-considered-harmful/)
592 int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
593 const BIGNUM *m, BN_CTX *ctx,
594 BN_MONT_CTX *in_mont)
596 int i, bits, ret = 0, window, wvalue, wmask, window0;
598 BN_MONT_CTX *mont = NULL;
601 unsigned char *powerbufFree = NULL;
603 unsigned char *powerbuf = NULL;
605 #if defined(SPARC_T4_MONT)
614 BNerr(BN_F_BN_MOD_EXP_MONT_CONSTTIME, BN_R_CALLED_WITH_EVEN_MODULUS);
621 * Use all bits stored in |p|, rather than |BN_num_bits|, so we do not leak
622 * whether the top bits are zero.
624 bits = p->top * BN_BITS2;
626 /* x**0 mod 1, or x**0 mod -1 is still zero. */
627 if (BN_abs_is_word(m, 1)) {
639 * Allocate a montgomery context if it was not supplied by the caller. If
640 * this is not done, things will break in the montgomery part.
645 if ((mont = BN_MONT_CTX_new()) == NULL)
647 if (!BN_MONT_CTX_set(mont, m, ctx))
654 * If the size of the operands allow it, perform the optimized
655 * RSAZ exponentiation. For further information see
656 * crypto/bn/rsaz_exp.c and accompanying assembly modules.
658 if ((16 == a->top) && (16 == p->top) && (BN_num_bits(m) == 1024)
659 && rsaz_avx2_eligible()) {
660 if (NULL == bn_wexpand(rr, 16))
662 RSAZ_1024_mod_exp_avx2(rr->d, a->d, p->d, m->d, mont->RR.d,
669 } else if ((8 == a->top) && (8 == p->top) && (BN_num_bits(m) == 512)) {
670 if (NULL == bn_wexpand(rr, 8))
672 RSAZ_512_mod_exp(rr->d, a->d, p->d, m->d, mont->n0[0], mont->RR.d);
682 /* Get the window size to use with size of p. */
683 window = BN_window_bits_for_ctime_exponent_size(bits);
684 #if defined(SPARC_T4_MONT)
685 if (window >= 5 && (top & 15) == 0 && top <= 64 &&
686 (OPENSSL_sparcv9cap_P[1] & (CFR_MONTMUL | CFR_MONTSQR)) ==
687 (CFR_MONTMUL | CFR_MONTSQR) && (t4 = OPENSSL_sparcv9cap_P[0]))
691 #if defined(OPENSSL_BN_ASM_MONT5)
693 window = 5; /* ~5% improvement for RSA2048 sign, and even
695 /* reserve space for mont->N.d[] copy */
696 powerbufLen += top * sizeof(mont->N.d[0]);
702 * Allocate a buffer large enough to hold all of the pre-computed powers
703 * of am, am itself and tmp.
705 numPowers = 1 << window;
706 powerbufLen += sizeof(m->d[0]) * (top * numPowers +
708 numPowers ? (2 * top) : numPowers));
710 if (powerbufLen < 3072)
712 alloca(powerbufLen + MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH);
716 OPENSSL_malloc(powerbufLen + MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH))
720 powerbuf = MOD_EXP_CTIME_ALIGN(powerbufFree);
721 memset(powerbuf, 0, powerbufLen);
724 if (powerbufLen < 3072)
728 /* lay down tmp and am right after powers table */
729 tmp.d = (BN_ULONG *)(powerbuf + sizeof(m->d[0]) * top * numPowers);
731 tmp.top = am.top = 0;
732 tmp.dmax = am.dmax = top;
733 tmp.neg = am.neg = 0;
734 tmp.flags = am.flags = BN_FLG_STATIC_DATA;
736 /* prepare a^0 in Montgomery domain */
737 #if 1 /* by Shay Gueron's suggestion */
738 if (m->d[top - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) {
739 /* 2^(top*BN_BITS2) - m */
740 tmp.d[0] = (0 - m->d[0]) & BN_MASK2;
741 for (i = 1; i < top; i++)
742 tmp.d[i] = (~m->d[i]) & BN_MASK2;
746 if (!bn_to_mont_fixed_top(&tmp, BN_value_one(), mont, ctx))
749 /* prepare a^1 in Montgomery domain */
750 if (a->neg || BN_ucmp(a, m) >= 0) {
751 if (!BN_nnmod(&am, a, m, ctx))
753 if (!bn_to_mont_fixed_top(&am, &am, mont, ctx))
755 } else if (!bn_to_mont_fixed_top(&am, a, mont, ctx))
758 #if defined(SPARC_T4_MONT)
760 typedef int (*bn_pwr5_mont_f) (BN_ULONG *tp, const BN_ULONG *np,
761 const BN_ULONG *n0, const void *table,
762 int power, int bits);
763 int bn_pwr5_mont_t4_8(BN_ULONG *tp, const BN_ULONG *np,
764 const BN_ULONG *n0, const void *table,
765 int power, int bits);
766 int bn_pwr5_mont_t4_16(BN_ULONG *tp, const BN_ULONG *np,
767 const BN_ULONG *n0, const void *table,
768 int power, int bits);
769 int bn_pwr5_mont_t4_24(BN_ULONG *tp, const BN_ULONG *np,
770 const BN_ULONG *n0, const void *table,
771 int power, int bits);
772 int bn_pwr5_mont_t4_32(BN_ULONG *tp, const BN_ULONG *np,
773 const BN_ULONG *n0, const void *table,
774 int power, int bits);
775 static const bn_pwr5_mont_f pwr5_funcs[4] = {
776 bn_pwr5_mont_t4_8, bn_pwr5_mont_t4_16,
777 bn_pwr5_mont_t4_24, bn_pwr5_mont_t4_32
779 bn_pwr5_mont_f pwr5_worker = pwr5_funcs[top / 16 - 1];
781 typedef int (*bn_mul_mont_f) (BN_ULONG *rp, const BN_ULONG *ap,
782 const void *bp, const BN_ULONG *np,
784 int bn_mul_mont_t4_8(BN_ULONG *rp, const BN_ULONG *ap, const void *bp,
785 const BN_ULONG *np, const BN_ULONG *n0);
786 int bn_mul_mont_t4_16(BN_ULONG *rp, const BN_ULONG *ap,
787 const void *bp, const BN_ULONG *np,
789 int bn_mul_mont_t4_24(BN_ULONG *rp, const BN_ULONG *ap,
790 const void *bp, const BN_ULONG *np,
792 int bn_mul_mont_t4_32(BN_ULONG *rp, const BN_ULONG *ap,
793 const void *bp, const BN_ULONG *np,
795 static const bn_mul_mont_f mul_funcs[4] = {
796 bn_mul_mont_t4_8, bn_mul_mont_t4_16,
797 bn_mul_mont_t4_24, bn_mul_mont_t4_32
799 bn_mul_mont_f mul_worker = mul_funcs[top / 16 - 1];
801 void bn_mul_mont_vis3(BN_ULONG *rp, const BN_ULONG *ap,
802 const void *bp, const BN_ULONG *np,
803 const BN_ULONG *n0, int num);
804 void bn_mul_mont_t4(BN_ULONG *rp, const BN_ULONG *ap,
805 const void *bp, const BN_ULONG *np,
806 const BN_ULONG *n0, int num);
807 void bn_mul_mont_gather5_t4(BN_ULONG *rp, const BN_ULONG *ap,
808 const void *table, const BN_ULONG *np,
809 const BN_ULONG *n0, int num, int power);
810 void bn_flip_n_scatter5_t4(const BN_ULONG *inp, size_t num,
811 void *table, size_t power);
812 void bn_gather5_t4(BN_ULONG *out, size_t num,
813 void *table, size_t power);
814 void bn_flip_t4(BN_ULONG *dst, BN_ULONG *src, size_t num);
816 BN_ULONG *np = mont->N.d, *n0 = mont->n0;
817 int stride = 5 * (6 - (top / 16 - 1)); /* multiple of 5, but less
821 * BN_to_montgomery can contaminate words above .top [in
822 * BN_DEBUG[_DEBUG] build]...
824 for (i = am.top; i < top; i++)
826 for (i = tmp.top; i < top; i++)
829 bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, 0);
830 bn_flip_n_scatter5_t4(am.d, top, powerbuf, 1);
831 if (!(*mul_worker) (tmp.d, am.d, am.d, np, n0) &&
832 !(*mul_worker) (tmp.d, am.d, am.d, np, n0))
833 bn_mul_mont_vis3(tmp.d, am.d, am.d, np, n0, top);
834 bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, 2);
836 for (i = 3; i < 32; i++) {
837 /* Calculate a^i = a^(i-1) * a */
838 if (!(*mul_worker) (tmp.d, tmp.d, am.d, np, n0) &&
839 !(*mul_worker) (tmp.d, tmp.d, am.d, np, n0))
840 bn_mul_mont_vis3(tmp.d, tmp.d, am.d, np, n0, top);
841 bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, i);
844 /* switch to 64-bit domain */
845 np = alloca(top * sizeof(BN_ULONG));
847 bn_flip_t4(np, mont->N.d, top);
850 * The exponent may not have a whole number of fixed-size windows.
851 * To simplify the main loop, the initial window has between 1 and
852 * full-window-size bits such that what remains is always a whole
855 window0 = (bits - 1) % 5 + 1;
856 wmask = (1 << window0) - 1;
858 wvalue = bn_get_bits(p, bits) & wmask;
859 bn_gather5_t4(tmp.d, top, powerbuf, wvalue);
862 * Scan the exponent one window at a time starting from the most
869 wvalue = bn_get_bits(p, bits);
871 if ((*pwr5_worker) (tmp.d, np, n0, powerbuf, wvalue, stride))
873 /* retry once and fall back */
874 if ((*pwr5_worker) (tmp.d, np, n0, powerbuf, wvalue, stride))
878 wvalue >>= stride - 5;
880 bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
881 bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
882 bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
883 bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
884 bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top);
885 bn_mul_mont_gather5_t4(tmp.d, tmp.d, powerbuf, np, n0, top,
889 bn_flip_t4(tmp.d, tmp.d, top);
891 /* back to 32-bit domain */
893 bn_correct_top(&tmp);
894 OPENSSL_cleanse(np, top * sizeof(BN_ULONG));
897 #if defined(OPENSSL_BN_ASM_MONT5)
898 if (window == 5 && top > 1) {
900 * This optimization uses ideas from http://eprint.iacr.org/2011/239,
901 * specifically optimization of cache-timing attack countermeasures
902 * and pre-computation optimization.
906 * Dedicated window==4 case improves 512-bit RSA sign by ~15%, but as
907 * 512-bit RSA is hardly relevant, we omit it to spare size...
909 void bn_mul_mont_gather5(BN_ULONG *rp, const BN_ULONG *ap,
910 const void *table, const BN_ULONG *np,
911 const BN_ULONG *n0, int num, int power);
912 void bn_scatter5(const BN_ULONG *inp, size_t num,
913 void *table, size_t power);
914 void bn_gather5(BN_ULONG *out, size_t num, void *table, size_t power);
915 void bn_power5(BN_ULONG *rp, const BN_ULONG *ap,
916 const void *table, const BN_ULONG *np,
917 const BN_ULONG *n0, int num, int power);
918 int bn_get_bits5(const BN_ULONG *ap, int off);
919 int bn_from_montgomery(BN_ULONG *rp, const BN_ULONG *ap,
920 const BN_ULONG *not_used, const BN_ULONG *np,
921 const BN_ULONG *n0, int num);
923 BN_ULONG *n0 = mont->n0, *np;
926 * BN_to_montgomery can contaminate words above .top [in
927 * BN_DEBUG[_DEBUG] build]...
929 for (i = am.top; i < top; i++)
931 for (i = tmp.top; i < top; i++)
935 * copy mont->N.d[] to improve cache locality
937 for (np = am.d + top, i = 0; i < top; i++)
938 np[i] = mont->N.d[i];
940 bn_scatter5(tmp.d, top, powerbuf, 0);
941 bn_scatter5(am.d, am.top, powerbuf, 1);
942 bn_mul_mont(tmp.d, am.d, am.d, np, n0, top);
943 bn_scatter5(tmp.d, top, powerbuf, 2);
946 for (i = 3; i < 32; i++) {
947 /* Calculate a^i = a^(i-1) * a */
948 bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1);
949 bn_scatter5(tmp.d, top, powerbuf, i);
952 /* same as above, but uses squaring for 1/2 of operations */
953 for (i = 4; i < 32; i *= 2) {
954 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
955 bn_scatter5(tmp.d, top, powerbuf, i);
957 for (i = 3; i < 8; i += 2) {
959 bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1);
960 bn_scatter5(tmp.d, top, powerbuf, i);
961 for (j = 2 * i; j < 32; j *= 2) {
962 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
963 bn_scatter5(tmp.d, top, powerbuf, j);
966 for (; i < 16; i += 2) {
967 bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1);
968 bn_scatter5(tmp.d, top, powerbuf, i);
969 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
970 bn_scatter5(tmp.d, top, powerbuf, 2 * i);
972 for (; i < 32; i += 2) {
973 bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1);
974 bn_scatter5(tmp.d, top, powerbuf, i);
978 * The exponent may not have a whole number of fixed-size windows.
979 * To simplify the main loop, the initial window has between 1 and
980 * full-window-size bits such that what remains is always a whole
983 window0 = (bits - 1) % 5 + 1;
984 wmask = (1 << window0) - 1;
986 wvalue = bn_get_bits(p, bits) & wmask;
987 bn_gather5(tmp.d, top, powerbuf, wvalue);
990 * Scan the exponent one window at a time starting from the most
995 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
996 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
997 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
998 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
999 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
1000 bn_mul_mont_gather5(tmp.d, tmp.d, powerbuf, np, n0, top,
1001 bn_get_bits5(p->d, bits -= 5));
1005 bn_power5(tmp.d, tmp.d, powerbuf, np, n0, top,
1006 bn_get_bits5(p->d, bits -= 5));
1010 ret = bn_from_montgomery(tmp.d, tmp.d, NULL, np, n0, top);
1012 bn_correct_top(&tmp);
1014 if (!BN_copy(rr, &tmp))
1016 goto err; /* non-zero ret means it's not error */
1021 if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 0, window))
1023 if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&am, top, powerbuf, 1, window))
1027 * If the window size is greater than 1, then calculate
1028 * val[i=2..2^winsize-1]. Powers are computed as a*a^(i-1) (even
1029 * powers could instead be computed as (a^(i/2))^2 to use the slight
1030 * performance advantage of sqr over mul).
1033 if (!bn_mul_mont_fixed_top(&tmp, &am, &am, mont, ctx))
1035 if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 2,
1038 for (i = 3; i < numPowers; i++) {
1039 /* Calculate a^i = a^(i-1) * a */
1040 if (!bn_mul_mont_fixed_top(&tmp, &am, &tmp, mont, ctx))
1042 if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, i,
1049 * The exponent may not have a whole number of fixed-size windows.
1050 * To simplify the main loop, the initial window has between 1 and
1051 * full-window-size bits such that what remains is always a whole
1054 window0 = (bits - 1) % window + 1;
1055 wmask = (1 << window0) - 1;
1057 wvalue = bn_get_bits(p, bits) & wmask;
1058 if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&tmp, top, powerbuf, wvalue,
1062 wmask = (1 << window) - 1;
1064 * Scan the exponent one window at a time starting from the most
1069 /* Square the result window-size times */
1070 for (i = 0; i < window; i++)
1071 if (!bn_mul_mont_fixed_top(&tmp, &tmp, &tmp, mont, ctx))
1075 * Get a window's worth of bits from the exponent
1076 * This avoids calling BN_is_bit_set for each bit, which
1077 * is not only slower but also makes each bit vulnerable to
1078 * EM (and likely other) side-channel attacks like One&Done
1079 * (for details see "One&Done: A Single-Decryption EM-Based
1080 * Attack on OpenSSL’s Constant-Time Blinded RSA" by M. Alam,
1081 * H. Khan, M. Dey, N. Sinha, R. Callan, A. Zajic, and
1082 * M. Prvulovic, in USENIX Security'18)
1085 wvalue = bn_get_bits(p, bits) & wmask;
1087 * Fetch the appropriate pre-computed value from the pre-buf
1089 if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&am, top, powerbuf, wvalue,
1093 /* Multiply the result into the intermediate result */
1094 if (!bn_mul_mont_fixed_top(&tmp, &tmp, &am, mont, ctx))
1100 * Done with zero-padded intermediate BIGNUMs. Final BN_from_montgomery
1101 * removes padding [if any] and makes return value suitable for public
1104 #if defined(SPARC_T4_MONT)
1105 if (OPENSSL_sparcv9cap_P[0] & (SPARCV9_VIS3 | SPARCV9_PREFER_FPU)) {
1106 am.d[0] = 1; /* borrow am */
1107 for (i = 1; i < top; i++)
1109 if (!BN_mod_mul_montgomery(rr, &tmp, &am, mont, ctx))
1113 if (!BN_from_montgomery(rr, &tmp, mont, ctx))
1117 if (in_mont == NULL)
1118 BN_MONT_CTX_free(mont);
1119 if (powerbuf != NULL) {
1120 OPENSSL_cleanse(powerbuf, powerbufLen);
1121 OPENSSL_free(powerbufFree);
1127 int BN_mod_exp_mont_word(BIGNUM *rr, BN_ULONG a, const BIGNUM *p,
1128 const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *in_mont)
1130 BN_MONT_CTX *mont = NULL;
1131 int b, bits, ret = 0;
1136 #define BN_MOD_MUL_WORD(r, w, m) \
1137 (BN_mul_word(r, (w)) && \
1138 (/* BN_ucmp(r, (m)) < 0 ? 1 :*/ \
1139 (BN_mod(t, r, m, ctx) && (swap_tmp = r, r = t, t = swap_tmp, 1))))
1141 * BN_MOD_MUL_WORD is only used with 'w' large, so the BN_ucmp test is
1142 * probably more overhead than always using BN_mod (which uses BN_copy if
1143 * a similar test returns true).
1146 * We can use BN_mod and do not need BN_nnmod because our accumulator is
1147 * never negative (the result of BN_mod does not depend on the sign of
1150 #define BN_TO_MONTGOMERY_WORD(r, w, mont) \
1151 (BN_set_word(r, (w)) && BN_to_montgomery(r, r, (mont), ctx))
1153 if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0
1154 || BN_get_flags(m, BN_FLG_CONSTTIME) != 0) {
1155 /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */
1156 BNerr(BN_F_BN_MOD_EXP_MONT_WORD, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
1163 if (!BN_is_odd(m)) {
1164 BNerr(BN_F_BN_MOD_EXP_MONT_WORD, BN_R_CALLED_WITH_EVEN_MODULUS);
1168 a %= m->d[0]; /* make sure that 'a' is reduced */
1170 bits = BN_num_bits(p);
1172 /* x**0 mod 1, or x**0 mod -1 is still zero. */
1173 if (BN_abs_is_word(m, 1)) {
1188 r = BN_CTX_get(ctx);
1189 t = BN_CTX_get(ctx);
1193 if (in_mont != NULL)
1196 if ((mont = BN_MONT_CTX_new()) == NULL)
1198 if (!BN_MONT_CTX_set(mont, m, ctx))
1202 r_is_one = 1; /* except for Montgomery factor */
1206 /* The result is accumulated in the product r*w. */
1207 w = a; /* bit 'bits-1' of 'p' is always set */
1208 for (b = bits - 2; b >= 0; b--) {
1209 /* First, square r*w. */
1211 if ((next_w / w) != w) { /* overflow */
1213 if (!BN_TO_MONTGOMERY_WORD(r, w, mont))
1217 if (!BN_MOD_MUL_WORD(r, w, m))
1224 if (!BN_mod_mul_montgomery(r, r, r, mont, ctx))
1228 /* Second, multiply r*w by 'a' if exponent bit is set. */
1229 if (BN_is_bit_set(p, b)) {
1231 if ((next_w / a) != w) { /* overflow */
1233 if (!BN_TO_MONTGOMERY_WORD(r, w, mont))
1237 if (!BN_MOD_MUL_WORD(r, w, m))
1246 /* Finally, set r:=r*w. */
1249 if (!BN_TO_MONTGOMERY_WORD(r, w, mont))
1253 if (!BN_MOD_MUL_WORD(r, w, m))
1258 if (r_is_one) { /* can happen only if a == 1 */
1262 if (!BN_from_montgomery(rr, r, mont, ctx))
1267 if (in_mont == NULL)
1268 BN_MONT_CTX_free(mont);
1274 /* The old fallback, simple version :-) */
1275 int BN_mod_exp_simple(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
1276 const BIGNUM *m, BN_CTX *ctx)
1278 int i, j, bits, ret = 0, wstart, wend, window, wvalue;
1281 /* Table of variables obtained from 'ctx' */
1282 BIGNUM *val[TABLE_SIZE];
1284 if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0
1285 || BN_get_flags(a, BN_FLG_CONSTTIME) != 0
1286 || BN_get_flags(m, BN_FLG_CONSTTIME) != 0) {
1287 /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */
1288 BNerr(BN_F_BN_MOD_EXP_SIMPLE, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
1292 bits = BN_num_bits(p);
1294 /* x**0 mod 1, or x**0 mod -1 is still zero. */
1295 if (BN_abs_is_word(m, 1)) {
1305 d = BN_CTX_get(ctx);
1306 val[0] = BN_CTX_get(ctx);
1310 if (!BN_nnmod(val[0], a, m, ctx))
1312 if (BN_is_zero(val[0])) {
1318 window = BN_window_bits_for_exponent_size(bits);
1320 if (!BN_mod_mul(d, val[0], val[0], m, ctx))
1322 j = 1 << (window - 1);
1323 for (i = 1; i < j; i++) {
1324 if (((val[i] = BN_CTX_get(ctx)) == NULL) ||
1325 !BN_mod_mul(val[i], val[i - 1], d, m, ctx))
1330 start = 1; /* This is used to avoid multiplication etc
1331 * when there is only the value '1' in the
1333 wvalue = 0; /* The 'value' of the window */
1334 wstart = bits - 1; /* The top bit of the window */
1335 wend = 0; /* The bottom bit of the window */
1341 if (BN_is_bit_set(p, wstart) == 0) {
1343 if (!BN_mod_mul(r, r, r, m, ctx))
1351 * We now have wstart on a 'set' bit, we now need to work out how bit
1352 * a window to do. To do this we need to scan forward until the last
1353 * set bit before the end of the window
1358 for (i = 1; i < window; i++) {
1361 if (BN_is_bit_set(p, wstart - i)) {
1362 wvalue <<= (i - wend);
1368 /* wend is the size of the current window */
1370 /* add the 'bytes above' */
1372 for (i = 0; i < j; i++) {
1373 if (!BN_mod_mul(r, r, r, m, ctx))
1377 /* wvalue will be an odd number < 2^window */
1378 if (!BN_mod_mul(r, r, val[wvalue >> 1], m, ctx))
1381 /* move the 'window' down further */