|
40 | 40 | secp256k1_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \ |
41 | 41 | } while(0) |
42 | 42 |
|
43 | | - |
44 | 43 | /** Convert a number to WNAF notation. |
45 | 44 | * The number becomes represented by sum(2^{wi} * wnaf[i], i=0..WNAF_SIZE(w)+1) - return_val. |
46 | 45 | * It has the following guarantees: |
|
56 | 55 | */ |
57 | 56 | static int secp256k1_wnaf_const(int *wnaf, const secp256k1_scalar *scalar, int w, int size) { |
58 | 57 | int global_sign; |
59 | | - int skew = 0; |
| 58 | + int skew; |
60 | 59 | int word = 0; |
61 | 60 |
|
62 | 61 | /* 1 2 3 */ |
63 | 62 | int u_last; |
64 | 63 | int u; |
65 | 64 |
|
66 | 65 | int flip; |
67 | | - int bit; |
68 | | - secp256k1_scalar s; |
69 | | - int not_neg_one; |
| 66 | + secp256k1_scalar s = *scalar; |
70 | 67 |
|
71 | 68 | VERIFY_CHECK(w > 0); |
72 | 69 | VERIFY_CHECK(size > 0); |
73 | 70 |
|
74 | 71 | /* Note that we cannot handle even numbers by negating them to be odd, as is |
75 | 72 | * done in other implementations, since if our scalars were specified to have |
76 | 73 | * width < 256 for performance reasons, their negations would have width 256 |
77 | | - * and we'd lose any performance benefit. Instead, we use a technique from |
78 | | - * Section 4.2 of the Okeya/Tagaki paper, which is to add either 1 (for even) |
79 | | - * or 2 (for odd) to the number we are encoding, returning a skew value indicating |
| 74 | + * and we'd lose any performance benefit. Instead, we use a variation of a |
| 75 | + * technique from Section 4.2 of the Okeya/Tagaki paper, which is to add 1 to the |
| 76 | + * number we are encoding when it is even, returning a skew value indicating |
80 | 77 | * this, and having the caller compensate after doing the multiplication. |
81 | 78 | * |
82 | 79 | * In fact, we _do_ want to negate numbers to minimize their bit-lengths (and in |
83 | 80 | * particular, to ensure that the outputs from the endomorphism-split fit into |
84 | | - * 128 bits). If we negate, the parity of our number flips, inverting which of |
85 | | - * {1, 2} we want to add to the scalar when ensuring that it's odd. Further |
86 | | - * complicating things, -1 interacts badly with `secp256k1_scalar_cadd_bit` and |
87 | | - * we need to special-case it in this logic. */ |
88 | | - flip = secp256k1_scalar_is_high(scalar); |
89 | | - /* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */ |
90 | | - bit = flip ^ !secp256k1_scalar_is_even(scalar); |
91 | | - /* We check for negative one, since adding 2 to it will cause an overflow */ |
92 | | - secp256k1_scalar_negate(&s, scalar); |
93 | | - not_neg_one = !secp256k1_scalar_is_one(&s); |
94 | | - s = *scalar; |
95 | | - secp256k1_scalar_cadd_bit(&s, bit, not_neg_one); |
96 | | - /* If we had negative one, flip == 1, s.d[0] == 0, bit == 1, so caller expects |
97 | | - * that we added two to it and flipped it. In fact for -1 these operations are |
98 | | - * identical. We only flipped, but since skewing is required (in the sense that |
99 | | - * the skew must be 1 or 2, never zero) and flipping is not, we need to change |
100 | | - * our flags to claim that we only skewed. */ |
| 81 | + * 128 bits). If we negate, the parity of our number flips, affecting whether |
| 82 | + * we want to add to the scalar to ensure that it's odd. */ |
| 83 | + flip = secp256k1_scalar_is_high(&s); |
| 84 | + skew = flip ^ secp256k1_scalar_is_even(&s); |
| 85 | + secp256k1_scalar_cadd_bit(&s, 0, skew); |
101 | 86 | global_sign = secp256k1_scalar_cond_negate(&s, flip); |
102 | | - global_sign *= not_neg_one * 2 - 1; |
103 | | - skew = 1 << bit; |
104 | 87 |
|
105 | 88 | /* 4 */ |
106 | 89 | u_last = secp256k1_scalar_shr_int(&s, w); |
@@ -220,19 +203,17 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons |
220 | 203 | /* Correct for wNAF skew */ |
221 | 204 | secp256k1_gej tmp; |
222 | 205 | secp256k1_ge a_1; |
223 | | - |
224 | 206 | secp256k1_ge_neg(&a_1, a); |
225 | | - secp256k1_gej_add_ge(r, r, &a_1); |
| 207 | + |
226 | 208 | secp256k1_gej_add_ge(&tmp, r, &a_1); |
227 | | - secp256k1_gej_cmov(r, &tmp, skew_1 == 2); |
| 209 | + secp256k1_gej_cmov(r, &tmp, skew_1); |
228 | 210 |
|
229 | 211 | if (size > 128) { |
230 | 212 | secp256k1_ge a_lam; |
231 | 213 | secp256k1_ge_mul_lambda(&a_lam, &a_1); |
232 | 214 |
|
233 | | - secp256k1_gej_add_ge(r, r, &a_lam); |
234 | 215 | secp256k1_gej_add_ge(&tmp, r, &a_lam); |
235 | | - secp256k1_gej_cmov(r, &tmp, skew_lam == 2); |
| 216 | + secp256k1_gej_cmov(r, &tmp, skew_lam); |
236 | 217 | } |
237 | 218 | } |
238 | 219 | } |
|
0 commit comments