Electroneum
Loading...
Searching...
No Matches
scalar_4x64_impl.h
Go to the documentation of this file.
1/***********************************************************************
2 * Copyright (c) 2013, 2014 Pieter Wuille *
3 * Distributed under the MIT software license, see the accompanying *
4 * file COPYING or https://www.opensource.org/licenses/mit-license.php.*
5 ***********************************************************************/
6
7#ifndef SECP256K1_SCALAR_REPR_IMPL_H
8#define SECP256K1_SCALAR_REPR_IMPL_H
9
10#include "checkmem.h"
11#include "int128.h"
12#include "modinv64_impl.h"
13
14/* Limbs of the secp256k1 order. */
15#define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL)
16#define SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL)
17#define SECP256K1_N_2 ((uint64_t)0xFFFFFFFFFFFFFFFEULL)
18#define SECP256K1_N_3 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
19
20/* Limbs of 2^256 minus the secp256k1 order. */
21#define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1)
22#define SECP256K1_N_C_1 (~SECP256K1_N_1)
23#define SECP256K1_N_C_2 (1)
24
25/* Limbs of half the secp256k1 order. */
26#define SECP256K1_N_H_0 ((uint64_t)0xDFE92F46681B20A0ULL)
27#define SECP256K1_N_H_1 ((uint64_t)0x5D576E7357A4501DULL)
28#define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
29#define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL)
30
31SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) {
32 r->d[0] = 0;
33 r->d[1] = 0;
34 r->d[2] = 0;
35 r->d[3] = 0;
36}
37
38SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) {
39 r->d[0] = v;
40 r->d[1] = 0;
41 r->d[2] = 0;
42 r->d[3] = 0;
43}
44
45SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
46 VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6);
47 return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1);
48}
49
50SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
51 VERIFY_CHECK(count < 32);
52 VERIFY_CHECK(offset + count <= 256);
53 if ((offset + count - 1) >> 6 == offset >> 6) {
54 return secp256k1_scalar_get_bits(a, offset, count);
55 } else {
56 VERIFY_CHECK((offset >> 6) + 1 < 4);
57 return ((a->d[offset >> 6] >> (offset & 0x3F)) | (a->d[(offset >> 6) + 1] << (64 - (offset & 0x3F)))) & ((((uint64_t)1) << count) - 1);
58 }
59}
60
61SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) {
62 int yes = 0;
63 int no = 0;
64 no |= (a->d[3] < SECP256K1_N_3); /* No need for a > check. */
65 no |= (a->d[2] < SECP256K1_N_2);
66 yes |= (a->d[2] > SECP256K1_N_2) & ~no;
67 no |= (a->d[1] < SECP256K1_N_1);
68 yes |= (a->d[1] > SECP256K1_N_1) & ~no;
69 yes |= (a->d[0] >= SECP256K1_N_0) & ~no;
70 return yes;
71}
72
73SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigned int overflow) {
75 VERIFY_CHECK(overflow <= 1);
76 secp256k1_u128_from_u64(&t, r->d[0]);
77 secp256k1_u128_accum_u64(&t, overflow * SECP256K1_N_C_0);
78 r->d[0] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
79 secp256k1_u128_accum_u64(&t, r->d[1]);
80 secp256k1_u128_accum_u64(&t, overflow * SECP256K1_N_C_1);
81 r->d[1] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
82 secp256k1_u128_accum_u64(&t, r->d[2]);
83 secp256k1_u128_accum_u64(&t, overflow * SECP256K1_N_C_2);
84 r->d[2] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
85 secp256k1_u128_accum_u64(&t, r->d[3]);
86 r->d[3] = secp256k1_u128_to_u64(&t);
87 return overflow;
88}
89
90static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
91 int overflow;
93 secp256k1_u128_from_u64(&t, a->d[0]);
94 secp256k1_u128_accum_u64(&t, b->d[0]);
95 r->d[0] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
96 secp256k1_u128_accum_u64(&t, a->d[1]);
97 secp256k1_u128_accum_u64(&t, b->d[1]);
98 r->d[1] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
99 secp256k1_u128_accum_u64(&t, a->d[2]);
100 secp256k1_u128_accum_u64(&t, b->d[2]);
101 r->d[2] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
102 secp256k1_u128_accum_u64(&t, a->d[3]);
103 secp256k1_u128_accum_u64(&t, b->d[3]);
104 r->d[3] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
105 overflow = secp256k1_u128_to_u64(&t) + secp256k1_scalar_check_overflow(r);
106 VERIFY_CHECK(overflow == 0 || overflow == 1);
107 secp256k1_scalar_reduce(r, overflow);
108 return overflow;
109}
110
111static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
113 VERIFY_CHECK(bit < 256);
114 bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */
115 secp256k1_u128_from_u64(&t, r->d[0]);
116 secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 0)) << (bit & 0x3F));
117 r->d[0] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
118 secp256k1_u128_accum_u64(&t, r->d[1]);
119 secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 1)) << (bit & 0x3F));
120 r->d[1] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
121 secp256k1_u128_accum_u64(&t, r->d[2]);
122 secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 2)) << (bit & 0x3F));
123 r->d[2] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
124 secp256k1_u128_accum_u64(&t, r->d[3]);
125 secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F));
126 r->d[3] = secp256k1_u128_to_u64(&t);
127#ifdef VERIFY
128 VERIFY_CHECK(secp256k1_u128_hi_u64(&t) == 0);
129#endif
130}
131
132static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
133 int over;
134 r->d[0] = (uint64_t)b32[31] | (uint64_t)b32[30] << 8 | (uint64_t)b32[29] << 16 | (uint64_t)b32[28] << 24 | (uint64_t)b32[27] << 32 | (uint64_t)b32[26] << 40 | (uint64_t)b32[25] << 48 | (uint64_t)b32[24] << 56;
135 r->d[1] = (uint64_t)b32[23] | (uint64_t)b32[22] << 8 | (uint64_t)b32[21] << 16 | (uint64_t)b32[20] << 24 | (uint64_t)b32[19] << 32 | (uint64_t)b32[18] << 40 | (uint64_t)b32[17] << 48 | (uint64_t)b32[16] << 56;
136 r->d[2] = (uint64_t)b32[15] | (uint64_t)b32[14] << 8 | (uint64_t)b32[13] << 16 | (uint64_t)b32[12] << 24 | (uint64_t)b32[11] << 32 | (uint64_t)b32[10] << 40 | (uint64_t)b32[9] << 48 | (uint64_t)b32[8] << 56;
137 r->d[3] = (uint64_t)b32[7] | (uint64_t)b32[6] << 8 | (uint64_t)b32[5] << 16 | (uint64_t)b32[4] << 24 | (uint64_t)b32[3] << 32 | (uint64_t)b32[2] << 40 | (uint64_t)b32[1] << 48 | (uint64_t)b32[0] << 56;
138 over = secp256k1_scalar_reduce(r, secp256k1_scalar_check_overflow(r));
139 if (overflow) {
140 *overflow = over;
141 }
142}
143
144static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
145 bin[0] = a->d[3] >> 56; bin[1] = a->d[3] >> 48; bin[2] = a->d[3] >> 40; bin[3] = a->d[3] >> 32; bin[4] = a->d[3] >> 24; bin[5] = a->d[3] >> 16; bin[6] = a->d[3] >> 8; bin[7] = a->d[3];
146 bin[8] = a->d[2] >> 56; bin[9] = a->d[2] >> 48; bin[10] = a->d[2] >> 40; bin[11] = a->d[2] >> 32; bin[12] = a->d[2] >> 24; bin[13] = a->d[2] >> 16; bin[14] = a->d[2] >> 8; bin[15] = a->d[2];
147 bin[16] = a->d[1] >> 56; bin[17] = a->d[1] >> 48; bin[18] = a->d[1] >> 40; bin[19] = a->d[1] >> 32; bin[20] = a->d[1] >> 24; bin[21] = a->d[1] >> 16; bin[22] = a->d[1] >> 8; bin[23] = a->d[1];
148 bin[24] = a->d[0] >> 56; bin[25] = a->d[0] >> 48; bin[26] = a->d[0] >> 40; bin[27] = a->d[0] >> 32; bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0];
149}
150
151SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
152 return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0;
153}
154
155static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
156 uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0);
158 secp256k1_u128_from_u64(&t, ~a->d[0]);
159 secp256k1_u128_accum_u64(&t, SECP256K1_N_0 + 1);
160 r->d[0] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
161 secp256k1_u128_accum_u64(&t, ~a->d[1]);
162 secp256k1_u128_accum_u64(&t, SECP256K1_N_1);
163 r->d[1] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
164 secp256k1_u128_accum_u64(&t, ~a->d[2]);
165 secp256k1_u128_accum_u64(&t, SECP256K1_N_2);
166 r->d[2] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
167 secp256k1_u128_accum_u64(&t, ~a->d[3]);
168 secp256k1_u128_accum_u64(&t, SECP256K1_N_3);
169 r->d[3] = secp256k1_u128_to_u64(&t) & nonzero;
170}
171
172SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
173 return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0;
174}
175
176static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
177 int yes = 0;
178 int no = 0;
179 no |= (a->d[3] < SECP256K1_N_H_3);
180 yes |= (a->d[3] > SECP256K1_N_H_3) & ~no;
181 no |= (a->d[2] < SECP256K1_N_H_2) & ~yes; /* No need for a > check. */
182 no |= (a->d[1] < SECP256K1_N_H_1) & ~yes;
183 yes |= (a->d[1] > SECP256K1_N_H_1) & ~no;
184 yes |= (a->d[0] > SECP256K1_N_H_0) & ~no;
185 return yes;
186}
187
188static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
189 /* If we are flag = 0, mask = 00...00 and this is a no-op;
190 * if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */
191 uint64_t mask = !flag - 1;
192 uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1;
194 secp256k1_u128_from_u64(&t, r->d[0] ^ mask);
195 secp256k1_u128_accum_u64(&t, (SECP256K1_N_0 + 1) & mask);
196 r->d[0] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
197 secp256k1_u128_accum_u64(&t, r->d[1] ^ mask);
198 secp256k1_u128_accum_u64(&t, SECP256K1_N_1 & mask);
199 r->d[1] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
200 secp256k1_u128_accum_u64(&t, r->d[2] ^ mask);
201 secp256k1_u128_accum_u64(&t, SECP256K1_N_2 & mask);
202 r->d[2] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
203 secp256k1_u128_accum_u64(&t, r->d[3] ^ mask);
204 secp256k1_u128_accum_u64(&t, SECP256K1_N_3 & mask);
205 r->d[3] = secp256k1_u128_to_u64(&t) & nonzero;
206 return 2 * (mask == 0) - 1;
207}
208
209/* Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. */
210
212#define muladd(a,b) { \
213 uint64_t tl, th; \
214 { \
215 secp256k1_uint128 t; \
216 secp256k1_u128_mul(&t, a, b); \
217 th = secp256k1_u128_hi_u64(&t); /* at most 0xFFFFFFFFFFFFFFFE */ \
218 tl = secp256k1_u128_to_u64(&t); \
219 } \
220 c0 += tl; /* overflow is handled on the next line */ \
221 th += (c0 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \
222 c1 += th; /* overflow is handled on the next line */ \
223 c2 += (c1 < th); /* never overflows by contract (verified in the next line) */ \
224 VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
225}
226
228#define muladd_fast(a,b) { \
229 uint64_t tl, th; \
230 { \
231 secp256k1_uint128 t; \
232 secp256k1_u128_mul(&t, a, b); \
233 th = secp256k1_u128_hi_u64(&t); /* at most 0xFFFFFFFFFFFFFFFE */ \
234 tl = secp256k1_u128_to_u64(&t); \
235 } \
236 c0 += tl; /* overflow is handled on the next line */ \
237 th += (c0 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \
238 c1 += th; /* never overflows by contract (verified in the next line) */ \
239 VERIFY_CHECK(c1 >= th); \
240}
241
243#define sumadd(a) { \
244 unsigned int over; \
245 c0 += (a); /* overflow is handled on the next line */ \
246 over = (c0 < (a)); \
247 c1 += over; /* overflow is handled on the next line */ \
248 c2 += (c1 < over); /* never overflows by contract */ \
249}
250
252#define sumadd_fast(a) { \
253 c0 += (a); /* overflow is handled on the next line */ \
254 c1 += (c0 < (a)); /* never overflows by contract (verified the next line) */ \
255 VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
256 VERIFY_CHECK(c2 == 0); \
257}
258
260#define extract(n) { \
261 (n) = c0; \
262 c0 = c1; \
263 c1 = c2; \
264 c2 = 0; \
265}
266
268#define extract_fast(n) { \
269 (n) = c0; \
270 c0 = c1; \
271 c1 = 0; \
272 VERIFY_CHECK(c2 == 0); \
273}
274
275static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) {
276#ifdef USE_ASM_X86_64
277 /* Reduce 512 bits into 385. */
278 uint64_t m0, m1, m2, m3, m4, m5, m6;
279 uint64_t p0, p1, p2, p3, p4;
280 uint64_t c;
281
282 __asm__ __volatile__(
283 /* Preload. */
284 "movq 32(%%rsi), %%r11\n"
285 "movq 40(%%rsi), %%r12\n"
286 "movq 48(%%rsi), %%r13\n"
287 "movq 56(%%rsi), %%r14\n"
288 /* Initialize r8,r9,r10 */
289 "movq 0(%%rsi), %%r8\n"
290 "xorq %%r9, %%r9\n"
291 "xorq %%r10, %%r10\n"
292 /* (r8,r9) += n0 * c0 */
293 "movq %8, %%rax\n"
294 "mulq %%r11\n"
295 "addq %%rax, %%r8\n"
296 "adcq %%rdx, %%r9\n"
297 /* extract m0 */
298 "movq %%r8, %q0\n"
299 "xorq %%r8, %%r8\n"
300 /* (r9,r10) += l1 */
301 "addq 8(%%rsi), %%r9\n"
302 "adcq $0, %%r10\n"
303 /* (r9,r10,r8) += n1 * c0 */
304 "movq %8, %%rax\n"
305 "mulq %%r12\n"
306 "addq %%rax, %%r9\n"
307 "adcq %%rdx, %%r10\n"
308 "adcq $0, %%r8\n"
309 /* (r9,r10,r8) += n0 * c1 */
310 "movq %9, %%rax\n"
311 "mulq %%r11\n"
312 "addq %%rax, %%r9\n"
313 "adcq %%rdx, %%r10\n"
314 "adcq $0, %%r8\n"
315 /* extract m1 */
316 "movq %%r9, %q1\n"
317 "xorq %%r9, %%r9\n"
318 /* (r10,r8,r9) += l2 */
319 "addq 16(%%rsi), %%r10\n"
320 "adcq $0, %%r8\n"
321 "adcq $0, %%r9\n"
322 /* (r10,r8,r9) += n2 * c0 */
323 "movq %8, %%rax\n"
324 "mulq %%r13\n"
325 "addq %%rax, %%r10\n"
326 "adcq %%rdx, %%r8\n"
327 "adcq $0, %%r9\n"
328 /* (r10,r8,r9) += n1 * c1 */
329 "movq %9, %%rax\n"
330 "mulq %%r12\n"
331 "addq %%rax, %%r10\n"
332 "adcq %%rdx, %%r8\n"
333 "adcq $0, %%r9\n"
334 /* (r10,r8,r9) += n0 */
335 "addq %%r11, %%r10\n"
336 "adcq $0, %%r8\n"
337 "adcq $0, %%r9\n"
338 /* extract m2 */
339 "movq %%r10, %q2\n"
340 "xorq %%r10, %%r10\n"
341 /* (r8,r9,r10) += l3 */
342 "addq 24(%%rsi), %%r8\n"
343 "adcq $0, %%r9\n"
344 "adcq $0, %%r10\n"
345 /* (r8,r9,r10) += n3 * c0 */
346 "movq %8, %%rax\n"
347 "mulq %%r14\n"
348 "addq %%rax, %%r8\n"
349 "adcq %%rdx, %%r9\n"
350 "adcq $0, %%r10\n"
351 /* (r8,r9,r10) += n2 * c1 */
352 "movq %9, %%rax\n"
353 "mulq %%r13\n"
354 "addq %%rax, %%r8\n"
355 "adcq %%rdx, %%r9\n"
356 "adcq $0, %%r10\n"
357 /* (r8,r9,r10) += n1 */
358 "addq %%r12, %%r8\n"
359 "adcq $0, %%r9\n"
360 "adcq $0, %%r10\n"
361 /* extract m3 */
362 "movq %%r8, %q3\n"
363 "xorq %%r8, %%r8\n"
364 /* (r9,r10,r8) += n3 * c1 */
365 "movq %9, %%rax\n"
366 "mulq %%r14\n"
367 "addq %%rax, %%r9\n"
368 "adcq %%rdx, %%r10\n"
369 "adcq $0, %%r8\n"
370 /* (r9,r10,r8) += n2 */
371 "addq %%r13, %%r9\n"
372 "adcq $0, %%r10\n"
373 "adcq $0, %%r8\n"
374 /* extract m4 */
375 "movq %%r9, %q4\n"
376 /* (r10,r8) += n3 */
377 "addq %%r14, %%r10\n"
378 "adcq $0, %%r8\n"
379 /* extract m5 */
380 "movq %%r10, %q5\n"
381 /* extract m6 */
382 "movq %%r8, %q6\n"
383 : "=g"(m0), "=g"(m1), "=g"(m2), "=g"(m3), "=g"(m4), "=g"(m5), "=g"(m6)
384 : "S"(l), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1)
385 : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc");
386
387 /* Reduce 385 bits into 258. */
388 __asm__ __volatile__(
389 /* Preload */
390 "movq %q9, %%r11\n"
391 "movq %q10, %%r12\n"
392 "movq %q11, %%r13\n"
393 /* Initialize (r8,r9,r10) */
394 "movq %q5, %%r8\n"
395 "xorq %%r9, %%r9\n"
396 "xorq %%r10, %%r10\n"
397 /* (r8,r9) += m4 * c0 */
398 "movq %12, %%rax\n"
399 "mulq %%r11\n"
400 "addq %%rax, %%r8\n"
401 "adcq %%rdx, %%r9\n"
402 /* extract p0 */
403 "movq %%r8, %q0\n"
404 "xorq %%r8, %%r8\n"
405 /* (r9,r10) += m1 */
406 "addq %q6, %%r9\n"
407 "adcq $0, %%r10\n"
408 /* (r9,r10,r8) += m5 * c0 */
409 "movq %12, %%rax\n"
410 "mulq %%r12\n"
411 "addq %%rax, %%r9\n"
412 "adcq %%rdx, %%r10\n"
413 "adcq $0, %%r8\n"
414 /* (r9,r10,r8) += m4 * c1 */
415 "movq %13, %%rax\n"
416 "mulq %%r11\n"
417 "addq %%rax, %%r9\n"
418 "adcq %%rdx, %%r10\n"
419 "adcq $0, %%r8\n"
420 /* extract p1 */
421 "movq %%r9, %q1\n"
422 "xorq %%r9, %%r9\n"
423 /* (r10,r8,r9) += m2 */
424 "addq %q7, %%r10\n"
425 "adcq $0, %%r8\n"
426 "adcq $0, %%r9\n"
427 /* (r10,r8,r9) += m6 * c0 */
428 "movq %12, %%rax\n"
429 "mulq %%r13\n"
430 "addq %%rax, %%r10\n"
431 "adcq %%rdx, %%r8\n"
432 "adcq $0, %%r9\n"
433 /* (r10,r8,r9) += m5 * c1 */
434 "movq %13, %%rax\n"
435 "mulq %%r12\n"
436 "addq %%rax, %%r10\n"
437 "adcq %%rdx, %%r8\n"
438 "adcq $0, %%r9\n"
439 /* (r10,r8,r9) += m4 */
440 "addq %%r11, %%r10\n"
441 "adcq $0, %%r8\n"
442 "adcq $0, %%r9\n"
443 /* extract p2 */
444 "movq %%r10, %q2\n"
445 /* (r8,r9) += m3 */
446 "addq %q8, %%r8\n"
447 "adcq $0, %%r9\n"
448 /* (r8,r9) += m6 * c1 */
449 "movq %13, %%rax\n"
450 "mulq %%r13\n"
451 "addq %%rax, %%r8\n"
452 "adcq %%rdx, %%r9\n"
453 /* (r8,r9) += m5 */
454 "addq %%r12, %%r8\n"
455 "adcq $0, %%r9\n"
456 /* extract p3 */
457 "movq %%r8, %q3\n"
458 /* (r9) += m6 */
459 "addq %%r13, %%r9\n"
460 /* extract p4 */
461 "movq %%r9, %q4\n"
462 : "=&g"(p0), "=&g"(p1), "=&g"(p2), "=g"(p3), "=g"(p4)
463 : "g"(m0), "g"(m1), "g"(m2), "g"(m3), "g"(m4), "g"(m5), "g"(m6), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1)
464 : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "cc");
465
466 /* Reduce 258 bits into 256. */
467 __asm__ __volatile__(
468 /* Preload */
469 "movq %q5, %%r10\n"
470 /* (rax,rdx) = p4 * c0 */
471 "movq %7, %%rax\n"
472 "mulq %%r10\n"
473 /* (rax,rdx) += p0 */
474 "addq %q1, %%rax\n"
475 "adcq $0, %%rdx\n"
476 /* extract r0 */
477 "movq %%rax, 0(%q6)\n"
478 /* Move to (r8,r9) */
479 "movq %%rdx, %%r8\n"
480 "xorq %%r9, %%r9\n"
481 /* (r8,r9) += p1 */
482 "addq %q2, %%r8\n"
483 "adcq $0, %%r9\n"
484 /* (r8,r9) += p4 * c1 */
485 "movq %8, %%rax\n"
486 "mulq %%r10\n"
487 "addq %%rax, %%r8\n"
488 "adcq %%rdx, %%r9\n"
489 /* Extract r1 */
490 "movq %%r8, 8(%q6)\n"
491 "xorq %%r8, %%r8\n"
492 /* (r9,r8) += p4 */
493 "addq %%r10, %%r9\n"
494 "adcq $0, %%r8\n"
495 /* (r9,r8) += p2 */
496 "addq %q3, %%r9\n"
497 "adcq $0, %%r8\n"
498 /* Extract r2 */
499 "movq %%r9, 16(%q6)\n"
500 "xorq %%r9, %%r9\n"
501 /* (r8,r9) += p3 */
502 "addq %q4, %%r8\n"
503 "adcq $0, %%r9\n"
504 /* Extract r3 */
505 "movq %%r8, 24(%q6)\n"
506 /* Extract c */
507 "movq %%r9, %q0\n"
508 : "=g"(c)
509 : "g"(p0), "g"(p1), "g"(p2), "g"(p3), "g"(p4), "D"(r), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1)
510 : "rax", "rdx", "r8", "r9", "r10", "cc", "memory");
511#else
513 uint64_t c, c0, c1, c2;
514 uint64_t n0 = l[4], n1 = l[5], n2 = l[6], n3 = l[7];
515 uint64_t m0, m1, m2, m3, m4, m5;
516 uint32_t m6;
517 uint64_t p0, p1, p2, p3;
518 uint32_t p4;
519
520 /* Reduce 512 bits into 385. */
521 /* m[0..6] = l[0..3] + n[0..3] * SECP256K1_N_C. */
522 c0 = l[0]; c1 = 0; c2 = 0;
524 extract_fast(m0);
525 sumadd_fast(l[1]);
528 extract(m1);
529 sumadd(l[2]);
532 sumadd(n0);
533 extract(m2);
534 sumadd(l[3]);
537 sumadd(n1);
538 extract(m3);
540 sumadd(n2);
541 extract(m4);
542 sumadd_fast(n3);
543 extract_fast(m5);
544 VERIFY_CHECK(c0 <= 1);
545 m6 = c0;
546
547 /* Reduce 385 bits into 258. */
548 /* p[0..4] = m[0..3] + m[4..6] * SECP256K1_N_C. */
549 c0 = m0; c1 = 0; c2 = 0;
551 extract_fast(p0);
552 sumadd_fast(m1);
555 extract(p1);
556 sumadd(m2);
559 sumadd(m4);
560 extract(p2);
561 sumadd_fast(m3);
563 sumadd_fast(m5);
564 extract_fast(p3);
565 p4 = c0 + m6;
566 VERIFY_CHECK(p4 <= 2);
567
568 /* Reduce 258 bits into 256. */
569 /* r[0..3] = p[0..3] + p[4] * SECP256K1_N_C. */
570 secp256k1_u128_from_u64(&c128, p0);
571 secp256k1_u128_accum_mul(&c128, SECP256K1_N_C_0, p4);
572 r->d[0] = secp256k1_u128_to_u64(&c128); secp256k1_u128_rshift(&c128, 64);
573 secp256k1_u128_accum_u64(&c128, p1);
574 secp256k1_u128_accum_mul(&c128, SECP256K1_N_C_1, p4);
575 r->d[1] = secp256k1_u128_to_u64(&c128); secp256k1_u128_rshift(&c128, 64);
576 secp256k1_u128_accum_u64(&c128, p2);
577 secp256k1_u128_accum_u64(&c128, p4);
578 r->d[2] = secp256k1_u128_to_u64(&c128); secp256k1_u128_rshift(&c128, 64);
579 secp256k1_u128_accum_u64(&c128, p3);
580 r->d[3] = secp256k1_u128_to_u64(&c128);
581 c = secp256k1_u128_hi_u64(&c128);
582#endif
583
584 /* Final reduction of r. */
585 secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r));
586}
587
588static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, const secp256k1_scalar *b) {
589#ifdef USE_ASM_X86_64
590 const uint64_t *pb = b->d;
591 __asm__ __volatile__(
592 /* Preload */
593 "movq 0(%%rdi), %%r15\n"
594 "movq 8(%%rdi), %%rbx\n"
595 "movq 16(%%rdi), %%rcx\n"
596 "movq 0(%%rdx), %%r11\n"
597 "movq 8(%%rdx), %%r12\n"
598 "movq 16(%%rdx), %%r13\n"
599 "movq 24(%%rdx), %%r14\n"
600 /* (rax,rdx) = a0 * b0 */
601 "movq %%r15, %%rax\n"
602 "mulq %%r11\n"
603 /* Extract l0 */
604 "movq %%rax, 0(%%rsi)\n"
605 /* (r8,r9,r10) = (rdx) */
606 "movq %%rdx, %%r8\n"
607 "xorq %%r9, %%r9\n"
608 "xorq %%r10, %%r10\n"
609 /* (r8,r9,r10) += a0 * b1 */
610 "movq %%r15, %%rax\n"
611 "mulq %%r12\n"
612 "addq %%rax, %%r8\n"
613 "adcq %%rdx, %%r9\n"
614 "adcq $0, %%r10\n"
615 /* (r8,r9,r10) += a1 * b0 */
616 "movq %%rbx, %%rax\n"
617 "mulq %%r11\n"
618 "addq %%rax, %%r8\n"
619 "adcq %%rdx, %%r9\n"
620 "adcq $0, %%r10\n"
621 /* Extract l1 */
622 "movq %%r8, 8(%%rsi)\n"
623 "xorq %%r8, %%r8\n"
624 /* (r9,r10,r8) += a0 * b2 */
625 "movq %%r15, %%rax\n"
626 "mulq %%r13\n"
627 "addq %%rax, %%r9\n"
628 "adcq %%rdx, %%r10\n"
629 "adcq $0, %%r8\n"
630 /* (r9,r10,r8) += a1 * b1 */
631 "movq %%rbx, %%rax\n"
632 "mulq %%r12\n"
633 "addq %%rax, %%r9\n"
634 "adcq %%rdx, %%r10\n"
635 "adcq $0, %%r8\n"
636 /* (r9,r10,r8) += a2 * b0 */
637 "movq %%rcx, %%rax\n"
638 "mulq %%r11\n"
639 "addq %%rax, %%r9\n"
640 "adcq %%rdx, %%r10\n"
641 "adcq $0, %%r8\n"
642 /* Extract l2 */
643 "movq %%r9, 16(%%rsi)\n"
644 "xorq %%r9, %%r9\n"
645 /* (r10,r8,r9) += a0 * b3 */
646 "movq %%r15, %%rax\n"
647 "mulq %%r14\n"
648 "addq %%rax, %%r10\n"
649 "adcq %%rdx, %%r8\n"
650 "adcq $0, %%r9\n"
651 /* Preload a3 */
652 "movq 24(%%rdi), %%r15\n"
653 /* (r10,r8,r9) += a1 * b2 */
654 "movq %%rbx, %%rax\n"
655 "mulq %%r13\n"
656 "addq %%rax, %%r10\n"
657 "adcq %%rdx, %%r8\n"
658 "adcq $0, %%r9\n"
659 /* (r10,r8,r9) += a2 * b1 */
660 "movq %%rcx, %%rax\n"
661 "mulq %%r12\n"
662 "addq %%rax, %%r10\n"
663 "adcq %%rdx, %%r8\n"
664 "adcq $0, %%r9\n"
665 /* (r10,r8,r9) += a3 * b0 */
666 "movq %%r15, %%rax\n"
667 "mulq %%r11\n"
668 "addq %%rax, %%r10\n"
669 "adcq %%rdx, %%r8\n"
670 "adcq $0, %%r9\n"
671 /* Extract l3 */
672 "movq %%r10, 24(%%rsi)\n"
673 "xorq %%r10, %%r10\n"
674 /* (r8,r9,r10) += a1 * b3 */
675 "movq %%rbx, %%rax\n"
676 "mulq %%r14\n"
677 "addq %%rax, %%r8\n"
678 "adcq %%rdx, %%r9\n"
679 "adcq $0, %%r10\n"
680 /* (r8,r9,r10) += a2 * b2 */
681 "movq %%rcx, %%rax\n"
682 "mulq %%r13\n"
683 "addq %%rax, %%r8\n"
684 "adcq %%rdx, %%r9\n"
685 "adcq $0, %%r10\n"
686 /* (r8,r9,r10) += a3 * b1 */
687 "movq %%r15, %%rax\n"
688 "mulq %%r12\n"
689 "addq %%rax, %%r8\n"
690 "adcq %%rdx, %%r9\n"
691 "adcq $0, %%r10\n"
692 /* Extract l4 */
693 "movq %%r8, 32(%%rsi)\n"
694 "xorq %%r8, %%r8\n"
695 /* (r9,r10,r8) += a2 * b3 */
696 "movq %%rcx, %%rax\n"
697 "mulq %%r14\n"
698 "addq %%rax, %%r9\n"
699 "adcq %%rdx, %%r10\n"
700 "adcq $0, %%r8\n"
701 /* (r9,r10,r8) += a3 * b2 */
702 "movq %%r15, %%rax\n"
703 "mulq %%r13\n"
704 "addq %%rax, %%r9\n"
705 "adcq %%rdx, %%r10\n"
706 "adcq $0, %%r8\n"
707 /* Extract l5 */
708 "movq %%r9, 40(%%rsi)\n"
709 /* (r10,r8) += a3 * b3 */
710 "movq %%r15, %%rax\n"
711 "mulq %%r14\n"
712 "addq %%rax, %%r10\n"
713 "adcq %%rdx, %%r8\n"
714 /* Extract l6 */
715 "movq %%r10, 48(%%rsi)\n"
716 /* Extract l7 */
717 "movq %%r8, 56(%%rsi)\n"
718 : "+d"(pb)
719 : "S"(l), "D"(a->d)
720 : "rax", "rbx", "rcx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "cc", "memory");
721#else
722 /* 160 bit accumulator. */
723 uint64_t c0 = 0, c1 = 0;
724 uint32_t c2 = 0;
725
726 /* l[0..7] = a[0..3] * b[0..3]. */
727 muladd_fast(a->d[0], b->d[0]);
728 extract_fast(l[0]);
729 muladd(a->d[0], b->d[1]);
730 muladd(a->d[1], b->d[0]);
731 extract(l[1]);
732 muladd(a->d[0], b->d[2]);
733 muladd(a->d[1], b->d[1]);
734 muladd(a->d[2], b->d[0]);
735 extract(l[2]);
736 muladd(a->d[0], b->d[3]);
737 muladd(a->d[1], b->d[2]);
738 muladd(a->d[2], b->d[1]);
739 muladd(a->d[3], b->d[0]);
740 extract(l[3]);
741 muladd(a->d[1], b->d[3]);
742 muladd(a->d[2], b->d[2]);
743 muladd(a->d[3], b->d[1]);
744 extract(l[4]);
745 muladd(a->d[2], b->d[3]);
746 muladd(a->d[3], b->d[2]);
747 extract(l[5]);
748 muladd_fast(a->d[3], b->d[3]);
749 extract_fast(l[6]);
750 VERIFY_CHECK(c1 == 0);
751 l[7] = c0;
752#endif
753}
754
755#undef sumadd
756#undef sumadd_fast
757#undef muladd
758#undef muladd_fast
759#undef extract
760#undef extract_fast
761
762static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
763 uint64_t l[8];
764 secp256k1_scalar_mul_512(l, a, b);
765 secp256k1_scalar_reduce_512(r, l);
766}
767
768static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
769 int ret;
770 VERIFY_CHECK(n > 0);
771 VERIFY_CHECK(n < 16);
772 ret = r->d[0] & ((1 << n) - 1);
773 r->d[0] = (r->d[0] >> n) + (r->d[1] << (64 - n));
774 r->d[1] = (r->d[1] >> n) + (r->d[2] << (64 - n));
775 r->d[2] = (r->d[2] >> n) + (r->d[3] << (64 - n));
776 r->d[3] = (r->d[3] >> n);
777 return ret;
778}
779
780static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
781 r1->d[0] = k->d[0];
782 r1->d[1] = k->d[1];
783 r1->d[2] = 0;
784 r1->d[3] = 0;
785 r2->d[0] = k->d[2];
786 r2->d[1] = k->d[3];
787 r2->d[2] = 0;
788 r2->d[3] = 0;
789}
790
791SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
792 return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0;
793}
794
795SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift) {
796 uint64_t l[8];
797 unsigned int shiftlimbs;
798 unsigned int shiftlow;
799 unsigned int shifthigh;
800 VERIFY_CHECK(shift >= 256);
801 secp256k1_scalar_mul_512(l, a, b);
802 shiftlimbs = shift >> 6;
803 shiftlow = shift & 0x3F;
804 shifthigh = 64 - shiftlow;
805 r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0;
806 r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
807 r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
808 r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0;
809 secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1);
810}
811
812static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
813 uint64_t mask0, mask1;
814 SECP256K1_CHECKMEM_CHECK_VERIFY(r->d, sizeof(r->d));
815 mask0 = flag + ~((uint64_t)0);
816 mask1 = ~mask0;
817 r->d[0] = (r->d[0] & mask0) | (a->d[0] & mask1);
818 r->d[1] = (r->d[1] & mask0) | (a->d[1] & mask1);
819 r->d[2] = (r->d[2] & mask0) | (a->d[2] & mask1);
820 r->d[3] = (r->d[3] & mask0) | (a->d[3] & mask1);
821}
822
823static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_modinv64_signed62 *a) {
824 const uint64_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4];
825
826 /* The output from secp256k1_modinv64{_var} should be normalized to range [0,modulus), and
827 * have limbs in [0,2^62). The modulus is < 2^256, so the top limb must be below 2^(256-62*4).
828 */
829 VERIFY_CHECK(a0 >> 62 == 0);
830 VERIFY_CHECK(a1 >> 62 == 0);
831 VERIFY_CHECK(a2 >> 62 == 0);
832 VERIFY_CHECK(a3 >> 62 == 0);
833 VERIFY_CHECK(a4 >> 8 == 0);
834
835 r->d[0] = a0 | a1 << 62;
836 r->d[1] = a1 >> 2 | a2 << 60;
837 r->d[2] = a2 >> 4 | a3 << 58;
838 r->d[3] = a3 >> 6 | a4 << 56;
839
840#ifdef VERIFY
841 VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
842#endif
843}
844
845static void secp256k1_scalar_to_signed62(secp256k1_modinv64_signed62 *r, const secp256k1_scalar *a) {
846 const uint64_t M62 = UINT64_MAX >> 2;
847 const uint64_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3];
848
849#ifdef VERIFY
850 VERIFY_CHECK(secp256k1_scalar_check_overflow(a) == 0);
851#endif
852
853 r->v[0] = a0 & M62;
854 r->v[1] = (a0 >> 62 | a1 << 2) & M62;
855 r->v[2] = (a1 >> 60 | a2 << 4) & M62;
856 r->v[3] = (a2 >> 58 | a3 << 6) & M62;
857 r->v[4] = a3 >> 56;
858}
859
860static const secp256k1_modinv64_modinfo secp256k1_const_modinfo_scalar = {
861 {{0x3FD25E8CD0364141LL, 0x2ABB739ABD2280EELL, -0x15LL, 0, 256}},
862 0x34F20099AA774EC1LL
863};
864
865static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) {
867#ifdef VERIFY
868 int zero_in = secp256k1_scalar_is_zero(x);
869#endif
870 secp256k1_scalar_to_signed62(&s, x);
871 secp256k1_modinv64(&s, &secp256k1_const_modinfo_scalar);
872 secp256k1_scalar_from_signed62(r, &s);
873
874#ifdef VERIFY
875 VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
876#endif
877}
878
879static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
881#ifdef VERIFY
882 int zero_in = secp256k1_scalar_is_zero(x);
883#endif
884 secp256k1_scalar_to_signed62(&s, x);
885 secp256k1_modinv64_var(&s, &secp256k1_const_modinfo_scalar);
886 secp256k1_scalar_from_signed62(r, &s);
887
888#ifdef VERIFY
889 VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
890#endif
891}
892
893SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
894 return !(a->d[0] & 1);
895}
896
897#endif /* SECP256K1_SCALAR_REPR_IMPL_H */
#define SECP256K1_CHECKMEM_CHECK_VERIFY(p, len)
Definition checkmem.h:85
#define VERIFY_CHECK(cond)
Definition util.h:96
mdb_size_t count(MDB_cursor *cur)
const GenericPointer< typename T::ValueType > T2 T::AllocatorType & a
Definition pointer.h:1124
#define SECP256K1_N_3
#define extract(n)
#define SECP256K1_N_C_2
#define SECP256K1_N_C_1
#define sumadd_fast(a)
#define SECP256K1_N_1
#define SECP256K1_N_2
#define SECP256K1_N_H_2
#define SECP256K1_N_C_0
#define extract_fast(n)
#define muladd(a, b)
#define SECP256K1_N_H_0
#define sumadd(a)
#define SECP256K1_N_H_1
#define SECP256K1_N_0
#define SECP256K1_N_H_3
#define muladd_fast(a, b)
#define SECP256K1_INLINE
Definition secp256k1.h:131
#define UINT64_MAX
Definition stdint.h:189
unsigned int uint32_t
Definition stdint.h:126
unsigned __int64 uint64_t
Definition stdint.h:136
uint64_t d[4]
Definition scalar_4x64.h:14