Bitcoin Core  31.0.0
P2P Digital Currency
scalar_4x64_impl.h
Go to the documentation of this file.
1 /***********************************************************************
2  * Copyright (c) 2013, 2014 Pieter Wuille *
3  * Distributed under the MIT software license, see the accompanying *
4  * file COPYING or https://www.opensource.org/licenses/mit-license.php.*
5  ***********************************************************************/
6 
7 #ifndef SECP256K1_SCALAR_REPR_IMPL_H
8 #define SECP256K1_SCALAR_REPR_IMPL_H
9 
10 #include "checkmem.h"
11 #include "int128.h"
12 #include "modinv64_impl.h"
13 #include "util.h"
14 
15 /* Limbs of the secp256k1 order. */
16 #define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL)
17 #define SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL)
18 #define SECP256K1_N_2 ((uint64_t)0xFFFFFFFFFFFFFFFEULL)
19 #define SECP256K1_N_3 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
20 
21 /* Limbs of 2^256 minus the secp256k1 order. */
22 #define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1)
23 #define SECP256K1_N_C_1 (~SECP256K1_N_1)
24 #define SECP256K1_N_C_2 (1)
25 
26 /* Limbs of half the secp256k1 order. */
27 #define SECP256K1_N_H_0 ((uint64_t)0xDFE92F46681B20A0ULL)
28 #define SECP256K1_N_H_1 ((uint64_t)0x5D576E7357A4501DULL)
29 #define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
30 #define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL)
31 
33  r->d[0] = v;
34  r->d[1] = 0;
35  r->d[2] = 0;
36  r->d[3] = 0;
37 
39 }
40 
41 SECP256K1_INLINE static uint32_t secp256k1_scalar_get_bits_limb32(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
43  VERIFY_CHECK(count > 0 && count <= 32);
44  VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6);
45 
46  return (a->d[offset >> 6] >> (offset & 0x3F)) & (0xFFFFFFFF >> (32 - count));
47 }
48 
49 SECP256K1_INLINE static uint32_t secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
51  VERIFY_CHECK(count > 0 && count <= 32);
52  VERIFY_CHECK(offset + count <= 256);
53 
54  if ((offset + count - 1) >> 6 == offset >> 6) {
55  return secp256k1_scalar_get_bits_limb32(a, offset, count);
56  } else {
57  VERIFY_CHECK((offset >> 6) + 1 < 4);
58  return ((a->d[offset >> 6] >> (offset & 0x3F)) | (a->d[(offset >> 6) + 1] << (64 - (offset & 0x3F)))) & (0xFFFFFFFF >> (32 - count));
59  }
60 }
61 
63  int yes = 0;
64  int no = 0;
65  no |= (a->d[3] < SECP256K1_N_3); /* No need for a > check. */
66  no |= (a->d[2] < SECP256K1_N_2);
67  yes |= (a->d[2] > SECP256K1_N_2) & ~no;
68  no |= (a->d[1] < SECP256K1_N_1);
69  yes |= (a->d[1] > SECP256K1_N_1) & ~no;
70  yes |= (a->d[0] >= SECP256K1_N_0) & ~no;
71  return yes;
72 }
73 
74 SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigned int overflow) {
76  VERIFY_CHECK(overflow <= 1);
77 
78  secp256k1_u128_from_u64(&t, r->d[0]);
81  secp256k1_u128_accum_u64(&t, r->d[1]);
84  secp256k1_u128_accum_u64(&t, r->d[2]);
87  secp256k1_u128_accum_u64(&t, r->d[3]);
88  r->d[3] = secp256k1_u128_to_u64(&t);
89 
91  return overflow;
92 }
93 
95  int overflow;
99 
100  secp256k1_u128_from_u64(&t, a->d[0]);
101  secp256k1_u128_accum_u64(&t, b->d[0]);
103  secp256k1_u128_accum_u64(&t, a->d[1]);
104  secp256k1_u128_accum_u64(&t, b->d[1]);
106  secp256k1_u128_accum_u64(&t, a->d[2]);
107  secp256k1_u128_accum_u64(&t, b->d[2]);
109  secp256k1_u128_accum_u64(&t, a->d[3]);
110  secp256k1_u128_accum_u64(&t, b->d[3]);
113  VERIFY_CHECK(overflow == 0 || overflow == 1);
114  secp256k1_scalar_reduce(r, overflow);
115 
117  return overflow;
118 }
119 
120 static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
122  volatile int vflag = flag;
123  VERIFY_CHECK(flag == 0 || flag == 1);
125  VERIFY_CHECK(bit < 256);
126 
127  bit += ((uint32_t) vflag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */
128  secp256k1_u128_from_u64(&t, r->d[0]);
129  secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 0)) << (bit & 0x3F));
131  secp256k1_u128_accum_u64(&t, r->d[1]);
132  secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 1)) << (bit & 0x3F));
134  secp256k1_u128_accum_u64(&t, r->d[2]);
135  secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 2)) << (bit & 0x3F));
137  secp256k1_u128_accum_u64(&t, r->d[3]);
138  secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F));
139  r->d[3] = secp256k1_u128_to_u64(&t);
140 
143 }
144 
145 static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
146  int over;
147  r->d[0] = secp256k1_read_be64(&b32[24]);
148  r->d[1] = secp256k1_read_be64(&b32[16]);
149  r->d[2] = secp256k1_read_be64(&b32[8]);
150  r->d[3] = secp256k1_read_be64(&b32[0]);
152  if (overflow) {
153  *overflow = over;
154  }
155 
157 }
158 
159 static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
161 
162  secp256k1_write_be64(&bin[0], a->d[3]);
163  secp256k1_write_be64(&bin[8], a->d[2]);
164  secp256k1_write_be64(&bin[16], a->d[1]);
165  secp256k1_write_be64(&bin[24], a->d[0]);
166 }
167 
170 
171  return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0;
172 }
173 
175  uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0);
178 
179  secp256k1_u128_from_u64(&t, ~a->d[0]);
181  r->d[0] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
182  secp256k1_u128_accum_u64(&t, ~a->d[1]);
184  r->d[1] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
185  secp256k1_u128_accum_u64(&t, ~a->d[2]);
187  r->d[2] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
188  secp256k1_u128_accum_u64(&t, ~a->d[3]);
190  r->d[3] = secp256k1_u128_to_u64(&t) & nonzero;
191 
193 }
194 
196  /* Writing `/` for field division and `//` for integer division, we compute
197  *
198  * a/2 = (a - (a&1))/2 + (a&1)/2
199  * = (a >> 1) + (a&1 ? 1/2 : 0)
200  * = (a >> 1) + (a&1 ? n//2+1 : 0),
201  *
202  * where n is the group order and in the last equality we have used 1/2 = n//2+1 (mod n).
203  * For n//2, we have the constants SECP256K1_N_H_0, ...
204  *
205  * This sum does not overflow. The most extreme case is a = -2, the largest odd scalar. Here:
206  * - the left summand is: a >> 1 = (a - a&1)/2 = (n-2-1)//2 = (n-3)//2
207  * - the right summand is: a&1 ? n//2+1 : 0 = n//2+1 = (n-1)//2 + 2//2 = (n+1)//2
208  * Together they sum to (n-3)//2 + (n+1)//2 = (2n-2)//2 = n - 1, which is less than n.
209  */
210  uint64_t mask = -(uint64_t)(a->d[0] & 1U);
213 
214  secp256k1_u128_from_u64(&t, (a->d[0] >> 1) | (a->d[1] << 63));
215  secp256k1_u128_accum_u64(&t, (SECP256K1_N_H_0 + 1U) & mask);
217  secp256k1_u128_accum_u64(&t, (a->d[1] >> 1) | (a->d[2] << 63));
220  secp256k1_u128_accum_u64(&t, (a->d[2] >> 1) | (a->d[3] << 63));
223  r->d[3] = secp256k1_u128_to_u64(&t) + (a->d[3] >> 1) + (SECP256K1_N_H_3 & mask);
224 #ifdef VERIFY
225  /* The line above only computed the bottom 64 bits of r->d[3]; redo the computation
226  * in full 128 bits to make sure the top 64 bits are indeed zero. */
227  secp256k1_u128_accum_u64(&t, a->d[3] >> 1);
229  secp256k1_u128_rshift(&t, 64);
231 
233 #endif
234 }
235 
238 
239  return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0;
240 }
241 
243  int yes = 0;
244  int no = 0;
246 
247  no |= (a->d[3] < SECP256K1_N_H_3);
248  yes |= (a->d[3] > SECP256K1_N_H_3) & ~no;
249  no |= (a->d[2] < SECP256K1_N_H_2) & ~yes; /* No need for a > check. */
250  no |= (a->d[1] < SECP256K1_N_H_1) & ~yes;
251  yes |= (a->d[1] > SECP256K1_N_H_1) & ~no;
252  yes |= (a->d[0] > SECP256K1_N_H_0) & ~no;
253  return yes;
254 }
255 
257  /* If we are flag = 0, mask = 00...00 and this is a no-op;
258  * if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */
259  volatile int vflag = flag;
260  uint64_t mask = -vflag;
261  uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1;
263  VERIFY_CHECK(flag == 0 || flag == 1);
265 
266  secp256k1_u128_from_u64(&t, r->d[0] ^ mask);
267  secp256k1_u128_accum_u64(&t, (SECP256K1_N_0 + 1) & mask);
268  r->d[0] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
269  secp256k1_u128_accum_u64(&t, r->d[1] ^ mask);
271  r->d[1] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
272  secp256k1_u128_accum_u64(&t, r->d[2] ^ mask);
274  r->d[2] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
275  secp256k1_u128_accum_u64(&t, r->d[3] ^ mask);
277  r->d[3] = secp256k1_u128_to_u64(&t) & nonzero;
278 
280  return 2 * (mask == 0) - 1;
281 }
282 
283 /* Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. */
284 
286 #define muladd(a,b) { \
287  uint64_t tl, th; \
288  { \
289  secp256k1_uint128 t; \
290  secp256k1_u128_mul(&t, a, b); \
291  th = secp256k1_u128_hi_u64(&t); /* at most 0xFFFFFFFFFFFFFFFE */ \
292  tl = secp256k1_u128_to_u64(&t); \
293  } \
294  c0 += tl; /* overflow is handled on the next line */ \
295  th += (c0 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \
296  c1 += th; /* overflow is handled on the next line */ \
297  c2 += (c1 < th); /* never overflows by contract (verified in the next line) */ \
298  VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
299 }
300 
302 #define muladd_fast(a,b) { \
303  uint64_t tl, th; \
304  { \
305  secp256k1_uint128 t; \
306  secp256k1_u128_mul(&t, a, b); \
307  th = secp256k1_u128_hi_u64(&t); /* at most 0xFFFFFFFFFFFFFFFE */ \
308  tl = secp256k1_u128_to_u64(&t); \
309  } \
310  c0 += tl; /* overflow is handled on the next line */ \
311  th += (c0 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \
312  c1 += th; /* never overflows by contract (verified in the next line) */ \
313  VERIFY_CHECK(c1 >= th); \
314 }
315 
317 #define sumadd(a) { \
318  unsigned int over; \
319  c0 += (a); /* overflow is handled on the next line */ \
320  over = (c0 < (a)); \
321  c1 += over; /* overflow is handled on the next line */ \
322  c2 += (c1 < over); /* never overflows by contract */ \
323 }
324 
326 #define sumadd_fast(a) { \
327  c0 += (a); /* overflow is handled on the next line */ \
328  c1 += (c0 < (a)); /* never overflows by contract (verified the next line) */ \
329  VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
330  VERIFY_CHECK(c2 == 0); \
331 }
332 
334 #define extract(n) { \
335  (n) = c0; \
336  c0 = c1; \
337  c1 = c2; \
338  c2 = 0; \
339 }
340 
342 #define extract_fast(n) { \
343  (n) = c0; \
344  c0 = c1; \
345  c1 = 0; \
346  VERIFY_CHECK(c2 == 0); \
347 }
348 
349 static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) {
350 #ifdef USE_ASM_X86_64
351  /* Reduce 512 bits into 385. */
352  uint64_t m0, m1, m2, m3, m4, m5, m6;
353  uint64_t p0, p1, p2, p3, p4;
354  uint64_t c;
355 
356  __asm__ __volatile__(
357  /* Preload. */
358  "movq 32(%%rsi), %%r11\n"
359  "movq 40(%%rsi), %%r12\n"
360  "movq 48(%%rsi), %%r13\n"
361  "movq 56(%%rsi), %%r14\n"
362  /* Initialize r8,r9,r10 */
363  "movq 0(%%rsi), %%r8\n"
364  "xorq %%r9, %%r9\n"
365  "xorq %%r10, %%r10\n"
366  /* (r8,r9) += n0 * c0 */
367  "movq %8, %%rax\n"
368  "mulq %%r11\n"
369  "addq %%rax, %%r8\n"
370  "adcq %%rdx, %%r9\n"
371  /* extract m0 */
372  "movq %%r8, %q0\n"
373  "xorq %%r8, %%r8\n"
374  /* (r9,r10) += l1 */
375  "addq 8(%%rsi), %%r9\n"
376  "adcq $0, %%r10\n"
377  /* (r9,r10,r8) += n1 * c0 */
378  "movq %8, %%rax\n"
379  "mulq %%r12\n"
380  "addq %%rax, %%r9\n"
381  "adcq %%rdx, %%r10\n"
382  "adcq $0, %%r8\n"
383  /* (r9,r10,r8) += n0 * c1 */
384  "movq %9, %%rax\n"
385  "mulq %%r11\n"
386  "addq %%rax, %%r9\n"
387  "adcq %%rdx, %%r10\n"
388  "adcq $0, %%r8\n"
389  /* extract m1 */
390  "movq %%r9, %q1\n"
391  "xorq %%r9, %%r9\n"
392  /* (r10,r8,r9) += l2 */
393  "addq 16(%%rsi), %%r10\n"
394  "adcq $0, %%r8\n"
395  "adcq $0, %%r9\n"
396  /* (r10,r8,r9) += n2 * c0 */
397  "movq %8, %%rax\n"
398  "mulq %%r13\n"
399  "addq %%rax, %%r10\n"
400  "adcq %%rdx, %%r8\n"
401  "adcq $0, %%r9\n"
402  /* (r10,r8,r9) += n1 * c1 */
403  "movq %9, %%rax\n"
404  "mulq %%r12\n"
405  "addq %%rax, %%r10\n"
406  "adcq %%rdx, %%r8\n"
407  "adcq $0, %%r9\n"
408  /* (r10,r8,r9) += n0 */
409  "addq %%r11, %%r10\n"
410  "adcq $0, %%r8\n"
411  "adcq $0, %%r9\n"
412  /* extract m2 */
413  "movq %%r10, %q2\n"
414  "xorq %%r10, %%r10\n"
415  /* (r8,r9,r10) += l3 */
416  "addq 24(%%rsi), %%r8\n"
417  "adcq $0, %%r9\n"
418  "adcq $0, %%r10\n"
419  /* (r8,r9,r10) += n3 * c0 */
420  "movq %8, %%rax\n"
421  "mulq %%r14\n"
422  "addq %%rax, %%r8\n"
423  "adcq %%rdx, %%r9\n"
424  "adcq $0, %%r10\n"
425  /* (r8,r9,r10) += n2 * c1 */
426  "movq %9, %%rax\n"
427  "mulq %%r13\n"
428  "addq %%rax, %%r8\n"
429  "adcq %%rdx, %%r9\n"
430  "adcq $0, %%r10\n"
431  /* (r8,r9,r10) += n1 */
432  "addq %%r12, %%r8\n"
433  "adcq $0, %%r9\n"
434  "adcq $0, %%r10\n"
435  /* extract m3 */
436  "movq %%r8, %q3\n"
437  "xorq %%r8, %%r8\n"
438  /* (r9,r10,r8) += n3 * c1 */
439  "movq %9, %%rax\n"
440  "mulq %%r14\n"
441  "addq %%rax, %%r9\n"
442  "adcq %%rdx, %%r10\n"
443  "adcq $0, %%r8\n"
444  /* (r9,r10,r8) += n2 */
445  "addq %%r13, %%r9\n"
446  "adcq $0, %%r10\n"
447  "adcq $0, %%r8\n"
448  /* extract m4 */
449  "movq %%r9, %q4\n"
450  /* (r10,r8) += n3 */
451  "addq %%r14, %%r10\n"
452  "adcq $0, %%r8\n"
453  /* extract m5 */
454  "movq %%r10, %q5\n"
455  /* extract m6 */
456  "movq %%r8, %q6\n"
457  : "=&g"(m0), "=&g"(m1), "=&g"(m2), "=g"(m3), "=g"(m4), "=g"(m5), "=g"(m6)
458  : "S"(l), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1)
459  : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc");
460 
461  SECP256K1_CHECKMEM_MSAN_DEFINE(&m0, sizeof(m0));
462  SECP256K1_CHECKMEM_MSAN_DEFINE(&m1, sizeof(m1));
463  SECP256K1_CHECKMEM_MSAN_DEFINE(&m2, sizeof(m2));
464  SECP256K1_CHECKMEM_MSAN_DEFINE(&m3, sizeof(m3));
465  SECP256K1_CHECKMEM_MSAN_DEFINE(&m4, sizeof(m4));
466  SECP256K1_CHECKMEM_MSAN_DEFINE(&m5, sizeof(m5));
467  SECP256K1_CHECKMEM_MSAN_DEFINE(&m6, sizeof(m6));
468 
469  /* Reduce 385 bits into 258. */
470  __asm__ __volatile__(
471  /* Preload */
472  "movq %q9, %%r11\n"
473  "movq %q10, %%r12\n"
474  "movq %q11, %%r13\n"
475  /* Initialize (r8,r9,r10) */
476  "movq %q5, %%r8\n"
477  "xorq %%r9, %%r9\n"
478  "xorq %%r10, %%r10\n"
479  /* (r8,r9) += m4 * c0 */
480  "movq %12, %%rax\n"
481  "mulq %%r11\n"
482  "addq %%rax, %%r8\n"
483  "adcq %%rdx, %%r9\n"
484  /* extract p0 */
485  "movq %%r8, %q0\n"
486  "xorq %%r8, %%r8\n"
487  /* (r9,r10) += m1 */
488  "addq %q6, %%r9\n"
489  "adcq $0, %%r10\n"
490  /* (r9,r10,r8) += m5 * c0 */
491  "movq %12, %%rax\n"
492  "mulq %%r12\n"
493  "addq %%rax, %%r9\n"
494  "adcq %%rdx, %%r10\n"
495  "adcq $0, %%r8\n"
496  /* (r9,r10,r8) += m4 * c1 */
497  "movq %13, %%rax\n"
498  "mulq %%r11\n"
499  "addq %%rax, %%r9\n"
500  "adcq %%rdx, %%r10\n"
501  "adcq $0, %%r8\n"
502  /* extract p1 */
503  "movq %%r9, %q1\n"
504  "xorq %%r9, %%r9\n"
505  /* (r10,r8,r9) += m2 */
506  "addq %q7, %%r10\n"
507  "adcq $0, %%r8\n"
508  "adcq $0, %%r9\n"
509  /* (r10,r8,r9) += m6 * c0 */
510  "movq %12, %%rax\n"
511  "mulq %%r13\n"
512  "addq %%rax, %%r10\n"
513  "adcq %%rdx, %%r8\n"
514  "adcq $0, %%r9\n"
515  /* (r10,r8,r9) += m5 * c1 */
516  "movq %13, %%rax\n"
517  "mulq %%r12\n"
518  "addq %%rax, %%r10\n"
519  "adcq %%rdx, %%r8\n"
520  "adcq $0, %%r9\n"
521  /* (r10,r8,r9) += m4 */
522  "addq %%r11, %%r10\n"
523  "adcq $0, %%r8\n"
524  "adcq $0, %%r9\n"
525  /* extract p2 */
526  "movq %%r10, %q2\n"
527  /* (r8,r9) += m3 */
528  "addq %q8, %%r8\n"
529  "adcq $0, %%r9\n"
530  /* (r8,r9) += m6 * c1 */
531  "movq %13, %%rax\n"
532  "mulq %%r13\n"
533  "addq %%rax, %%r8\n"
534  "adcq %%rdx, %%r9\n"
535  /* (r8,r9) += m5 */
536  "addq %%r12, %%r8\n"
537  "adcq $0, %%r9\n"
538  /* extract p3 */
539  "movq %%r8, %q3\n"
540  /* (r9) += m6 */
541  "addq %%r13, %%r9\n"
542  /* extract p4 */
543  "movq %%r9, %q4\n"
544  : "=&g"(p0), "=&g"(p1), "=&g"(p2), "=g"(p3), "=g"(p4)
545  : "g"(m0), "g"(m1), "g"(m2), "g"(m3), "g"(m4), "g"(m5), "g"(m6), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1)
546  : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "cc");
547 
548  SECP256K1_CHECKMEM_MSAN_DEFINE(&p0, sizeof(p0));
549  SECP256K1_CHECKMEM_MSAN_DEFINE(&p1, sizeof(p1));
550  SECP256K1_CHECKMEM_MSAN_DEFINE(&p2, sizeof(p2));
551  SECP256K1_CHECKMEM_MSAN_DEFINE(&p3, sizeof(p3));
552  SECP256K1_CHECKMEM_MSAN_DEFINE(&p4, sizeof(p4));
553 
554  /* Reduce 258 bits into 256. */
555  __asm__ __volatile__(
556  /* Preload */
557  "movq %q5, %%r10\n"
558  /* (rax,rdx) = p4 * c0 */
559  "movq %7, %%rax\n"
560  "mulq %%r10\n"
561  /* (rax,rdx) += p0 */
562  "addq %q1, %%rax\n"
563  "adcq $0, %%rdx\n"
564  /* extract r0 */
565  "movq %%rax, 0(%q6)\n"
566  /* Move to (r8,r9) */
567  "movq %%rdx, %%r8\n"
568  "xorq %%r9, %%r9\n"
569  /* (r8,r9) += p1 */
570  "addq %q2, %%r8\n"
571  "adcq $0, %%r9\n"
572  /* (r8,r9) += p4 * c1 */
573  "movq %8, %%rax\n"
574  "mulq %%r10\n"
575  "addq %%rax, %%r8\n"
576  "adcq %%rdx, %%r9\n"
577  /* Extract r1 */
578  "movq %%r8, 8(%q6)\n"
579  "xorq %%r8, %%r8\n"
580  /* (r9,r8) += p4 */
581  "addq %%r10, %%r9\n"
582  "adcq $0, %%r8\n"
583  /* (r9,r8) += p2 */
584  "addq %q3, %%r9\n"
585  "adcq $0, %%r8\n"
586  /* Extract r2 */
587  "movq %%r9, 16(%q6)\n"
588  "xorq %%r9, %%r9\n"
589  /* (r8,r9) += p3 */
590  "addq %q4, %%r8\n"
591  "adcq $0, %%r9\n"
592  /* Extract r3 */
593  "movq %%r8, 24(%q6)\n"
594  /* Extract c */
595  "movq %%r9, %q0\n"
596  : "=g"(c)
597  : "g"(p0), "g"(p1), "g"(p2), "g"(p3), "g"(p4), "D"(r), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1)
598  : "rax", "rdx", "r8", "r9", "r10", "cc", "memory");
599 
600  SECP256K1_CHECKMEM_MSAN_DEFINE(r, sizeof(*r));
601  SECP256K1_CHECKMEM_MSAN_DEFINE(&c, sizeof(c));
602 
603 #else
604  secp256k1_uint128 c128;
605  uint64_t c, c0, c1, c2;
606  uint64_t n0 = l[4], n1 = l[5], n2 = l[6], n3 = l[7];
607  uint64_t m0, m1, m2, m3, m4, m5;
608  uint32_t m6;
609  uint64_t p0, p1, p2, p3;
610  uint32_t p4;
611 
612  /* Reduce 512 bits into 385. */
613  /* m[0..6] = l[0..3] + n[0..3] * SECP256K1_N_C. */
614  c0 = l[0]; c1 = 0; c2 = 0;
616  extract_fast(m0);
617  sumadd_fast(l[1]);
618  muladd(n1, SECP256K1_N_C_0);
619  muladd(n0, SECP256K1_N_C_1);
620  extract(m1);
621  sumadd(l[2]);
622  muladd(n2, SECP256K1_N_C_0);
623  muladd(n1, SECP256K1_N_C_1);
624  sumadd(n0);
625  extract(m2);
626  sumadd(l[3]);
627  muladd(n3, SECP256K1_N_C_0);
628  muladd(n2, SECP256K1_N_C_1);
629  sumadd(n1);
630  extract(m3);
631  muladd(n3, SECP256K1_N_C_1);
632  sumadd(n2);
633  extract(m4);
634  sumadd_fast(n3);
635  extract_fast(m5);
636  VERIFY_CHECK(c0 <= 1);
637  m6 = c0;
638 
639  /* Reduce 385 bits into 258. */
640  /* p[0..4] = m[0..3] + m[4..6] * SECP256K1_N_C. */
641  c0 = m0; c1 = 0; c2 = 0;
643  extract_fast(p0);
644  sumadd_fast(m1);
645  muladd(m5, SECP256K1_N_C_0);
646  muladd(m4, SECP256K1_N_C_1);
647  extract(p1);
648  sumadd(m2);
649  muladd(m6, SECP256K1_N_C_0);
650  muladd(m5, SECP256K1_N_C_1);
651  sumadd(m4);
652  extract(p2);
653  sumadd_fast(m3);
655  sumadd_fast(m5);
656  extract_fast(p3);
657  p4 = c0 + m6;
658  VERIFY_CHECK(p4 <= 2);
659 
660  /* Reduce 258 bits into 256. */
661  /* r[0..3] = p[0..3] + p[4] * SECP256K1_N_C. */
662  secp256k1_u128_from_u64(&c128, p0);
664  r->d[0] = secp256k1_u128_to_u64(&c128); secp256k1_u128_rshift(&c128, 64);
665  secp256k1_u128_accum_u64(&c128, p1);
667  r->d[1] = secp256k1_u128_to_u64(&c128); secp256k1_u128_rshift(&c128, 64);
668  secp256k1_u128_accum_u64(&c128, p2);
669  secp256k1_u128_accum_u64(&c128, p4);
670  r->d[2] = secp256k1_u128_to_u64(&c128); secp256k1_u128_rshift(&c128, 64);
671  secp256k1_u128_accum_u64(&c128, p3);
672  r->d[3] = secp256k1_u128_to_u64(&c128);
673  c = secp256k1_u128_hi_u64(&c128);
674 #endif
675 
676  /* Final reduction of r. */
678 }
679 
680 static void secp256k1_scalar_mul_512(uint64_t *l8, const secp256k1_scalar *a, const secp256k1_scalar *b) {
681 #ifdef USE_ASM_X86_64
682  const uint64_t *pb = b->d;
683  __asm__ __volatile__(
684  /* Preload */
685  "movq 0(%%rdi), %%r15\n"
686  "movq 8(%%rdi), %%rbx\n"
687  "movq 16(%%rdi), %%rcx\n"
688  "movq 0(%%rdx), %%r11\n"
689  "movq 8(%%rdx), %%r12\n"
690  "movq 16(%%rdx), %%r13\n"
691  "movq 24(%%rdx), %%r14\n"
692  /* (rax,rdx) = a0 * b0 */
693  "movq %%r15, %%rax\n"
694  "mulq %%r11\n"
695  /* Extract l8[0] */
696  "movq %%rax, 0(%%rsi)\n"
697  /* (r8,r9,r10) = (rdx) */
698  "movq %%rdx, %%r8\n"
699  "xorq %%r9, %%r9\n"
700  "xorq %%r10, %%r10\n"
701  /* (r8,r9,r10) += a0 * b1 */
702  "movq %%r15, %%rax\n"
703  "mulq %%r12\n"
704  "addq %%rax, %%r8\n"
705  "adcq %%rdx, %%r9\n"
706  "adcq $0, %%r10\n"
707  /* (r8,r9,r10) += a1 * b0 */
708  "movq %%rbx, %%rax\n"
709  "mulq %%r11\n"
710  "addq %%rax, %%r8\n"
711  "adcq %%rdx, %%r9\n"
712  "adcq $0, %%r10\n"
713  /* Extract l8[1] */
714  "movq %%r8, 8(%%rsi)\n"
715  "xorq %%r8, %%r8\n"
716  /* (r9,r10,r8) += a0 * b2 */
717  "movq %%r15, %%rax\n"
718  "mulq %%r13\n"
719  "addq %%rax, %%r9\n"
720  "adcq %%rdx, %%r10\n"
721  "adcq $0, %%r8\n"
722  /* (r9,r10,r8) += a1 * b1 */
723  "movq %%rbx, %%rax\n"
724  "mulq %%r12\n"
725  "addq %%rax, %%r9\n"
726  "adcq %%rdx, %%r10\n"
727  "adcq $0, %%r8\n"
728  /* (r9,r10,r8) += a2 * b0 */
729  "movq %%rcx, %%rax\n"
730  "mulq %%r11\n"
731  "addq %%rax, %%r9\n"
732  "adcq %%rdx, %%r10\n"
733  "adcq $0, %%r8\n"
734  /* Extract l8[2] */
735  "movq %%r9, 16(%%rsi)\n"
736  "xorq %%r9, %%r9\n"
737  /* (r10,r8,r9) += a0 * b3 */
738  "movq %%r15, %%rax\n"
739  "mulq %%r14\n"
740  "addq %%rax, %%r10\n"
741  "adcq %%rdx, %%r8\n"
742  "adcq $0, %%r9\n"
743  /* Preload a3 */
744  "movq 24(%%rdi), %%r15\n"
745  /* (r10,r8,r9) += a1 * b2 */
746  "movq %%rbx, %%rax\n"
747  "mulq %%r13\n"
748  "addq %%rax, %%r10\n"
749  "adcq %%rdx, %%r8\n"
750  "adcq $0, %%r9\n"
751  /* (r10,r8,r9) += a2 * b1 */
752  "movq %%rcx, %%rax\n"
753  "mulq %%r12\n"
754  "addq %%rax, %%r10\n"
755  "adcq %%rdx, %%r8\n"
756  "adcq $0, %%r9\n"
757  /* (r10,r8,r9) += a3 * b0 */
758  "movq %%r15, %%rax\n"
759  "mulq %%r11\n"
760  "addq %%rax, %%r10\n"
761  "adcq %%rdx, %%r8\n"
762  "adcq $0, %%r9\n"
763  /* Extract l8[3] */
764  "movq %%r10, 24(%%rsi)\n"
765  "xorq %%r10, %%r10\n"
766  /* (r8,r9,r10) += a1 * b3 */
767  "movq %%rbx, %%rax\n"
768  "mulq %%r14\n"
769  "addq %%rax, %%r8\n"
770  "adcq %%rdx, %%r9\n"
771  "adcq $0, %%r10\n"
772  /* (r8,r9,r10) += a2 * b2 */
773  "movq %%rcx, %%rax\n"
774  "mulq %%r13\n"
775  "addq %%rax, %%r8\n"
776  "adcq %%rdx, %%r9\n"
777  "adcq $0, %%r10\n"
778  /* (r8,r9,r10) += a3 * b1 */
779  "movq %%r15, %%rax\n"
780  "mulq %%r12\n"
781  "addq %%rax, %%r8\n"
782  "adcq %%rdx, %%r9\n"
783  "adcq $0, %%r10\n"
784  /* Extract l8[4] */
785  "movq %%r8, 32(%%rsi)\n"
786  "xorq %%r8, %%r8\n"
787  /* (r9,r10,r8) += a2 * b3 */
788  "movq %%rcx, %%rax\n"
789  "mulq %%r14\n"
790  "addq %%rax, %%r9\n"
791  "adcq %%rdx, %%r10\n"
792  "adcq $0, %%r8\n"
793  /* (r9,r10,r8) += a3 * b2 */
794  "movq %%r15, %%rax\n"
795  "mulq %%r13\n"
796  "addq %%rax, %%r9\n"
797  "adcq %%rdx, %%r10\n"
798  "adcq $0, %%r8\n"
799  /* Extract l8[5] */
800  "movq %%r9, 40(%%rsi)\n"
801  /* (r10,r8) += a3 * b3 */
802  "movq %%r15, %%rax\n"
803  "mulq %%r14\n"
804  "addq %%rax, %%r10\n"
805  "adcq %%rdx, %%r8\n"
806  /* Extract l8[6] */
807  "movq %%r10, 48(%%rsi)\n"
808  /* Extract l8[7] */
809  "movq %%r8, 56(%%rsi)\n"
810  : "+d"(pb)
811  : "S"(l8), "D"(a->d)
812  : "rax", "rbx", "rcx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "cc", "memory");
813 
814  SECP256K1_CHECKMEM_MSAN_DEFINE(l8, sizeof(*l8) * 8);
815 
816 #else
817  /* 160 bit accumulator. */
818  uint64_t c0 = 0, c1 = 0;
819  uint32_t c2 = 0;
820 
821  /* l8[0..7] = a[0..3] * b[0..3]. */
822  muladd_fast(a->d[0], b->d[0]);
823  extract_fast(l8[0]);
824  muladd(a->d[0], b->d[1]);
825  muladd(a->d[1], b->d[0]);
826  extract(l8[1]);
827  muladd(a->d[0], b->d[2]);
828  muladd(a->d[1], b->d[1]);
829  muladd(a->d[2], b->d[0]);
830  extract(l8[2]);
831  muladd(a->d[0], b->d[3]);
832  muladd(a->d[1], b->d[2]);
833  muladd(a->d[2], b->d[1]);
834  muladd(a->d[3], b->d[0]);
835  extract(l8[3]);
836  muladd(a->d[1], b->d[3]);
837  muladd(a->d[2], b->d[2]);
838  muladd(a->d[3], b->d[1]);
839  extract(l8[4]);
840  muladd(a->d[2], b->d[3]);
841  muladd(a->d[3], b->d[2]);
842  extract(l8[5]);
843  muladd_fast(a->d[3], b->d[3]);
844  extract_fast(l8[6]);
845  VERIFY_CHECK(c1 == 0);
846  l8[7] = c0;
847 #endif
848 }
849 
850 #undef sumadd
851 #undef sumadd_fast
852 #undef muladd
853 #undef muladd_fast
854 #undef extract
855 #undef extract_fast
856 
858  uint64_t l[8];
861 
862  secp256k1_scalar_mul_512(l, a, b);
864 
866 }
867 
870 
871  r1->d[0] = k->d[0];
872  r1->d[1] = k->d[1];
873  r1->d[2] = 0;
874  r1->d[3] = 0;
875  r2->d[0] = k->d[2];
876  r2->d[1] = k->d[3];
877  r2->d[2] = 0;
878  r2->d[3] = 0;
879 
882 }
883 
887 
888  return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0;
889 }
890 
892  uint64_t l[8];
893  unsigned int shiftlimbs;
894  unsigned int shiftlow;
895  unsigned int shifthigh;
898  VERIFY_CHECK(shift >= 256);
899 
900  secp256k1_scalar_mul_512(l, a, b);
901  shiftlimbs = shift >> 6;
902  shiftlow = shift & 0x3F;
903  shifthigh = 64 - shiftlow;
904  r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0;
905  r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
906  r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
907  r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0;
908  secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1);
909 
911 }
912 
914  uint64_t mask0, mask1;
915  volatile int vflag = flag;
916  VERIFY_CHECK(flag == 0 || flag == 1);
918  SECP256K1_CHECKMEM_CHECK_VERIFY(r->d, sizeof(r->d));
919 
920  mask0 = vflag + ~((uint64_t)0);
921  mask1 = ~mask0;
922  r->d[0] = (r->d[0] & mask0) | (a->d[0] & mask1);
923  r->d[1] = (r->d[1] & mask0) | (a->d[1] & mask1);
924  r->d[2] = (r->d[2] & mask0) | (a->d[2] & mask1);
925  r->d[3] = (r->d[3] & mask0) | (a->d[3] & mask1);
926 
928 }
929 
931  const uint64_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4];
932 
933  /* The output from secp256k1_modinv64{_var} should be normalized to range [0,modulus), and
934  * have limbs in [0,2^62). The modulus is < 2^256, so the top limb must be below 2^(256-62*4).
935  */
936  VERIFY_CHECK(a0 >> 62 == 0);
937  VERIFY_CHECK(a1 >> 62 == 0);
938  VERIFY_CHECK(a2 >> 62 == 0);
939  VERIFY_CHECK(a3 >> 62 == 0);
940  VERIFY_CHECK(a4 >> 8 == 0);
941 
942  r->d[0] = a0 | a1 << 62;
943  r->d[1] = a1 >> 2 | a2 << 60;
944  r->d[2] = a2 >> 4 | a3 << 58;
945  r->d[3] = a3 >> 6 | a4 << 56;
946 
948 }
949 
951  const uint64_t M62 = UINT64_MAX >> 2;
952  const uint64_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3];
954 
955  r->v[0] = a0 & M62;
956  r->v[1] = (a0 >> 62 | a1 << 2) & M62;
957  r->v[2] = (a1 >> 60 | a2 << 4) & M62;
958  r->v[3] = (a2 >> 58 | a3 << 6) & M62;
959  r->v[4] = a3 >> 56;
960 }
961 
963  {{0x3FD25E8CD0364141LL, 0x2ABB739ABD2280EELL, -0x15LL, 0, 256}},
964  0x34F20099AA774EC1LL
965 };
966 
969 #ifdef VERIFY
970  int zero_in = secp256k1_scalar_is_zero(x);
971 #endif
973 
977 
979  VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
980 }
981 
984 #ifdef VERIFY
985  int zero_in = secp256k1_scalar_is_zero(x);
986 #endif
988 
992 
994  VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
995 }
996 
999 
1000  return !(a->d[0] & 1);
1001 }
1002 
1003 #endif /* SECP256K1_SCALAR_REPR_IMPL_H */
#define VERIFY_CHECK(cond)
Definition: util.h:159
static SECP256K1_INLINE int secp256k1_scalar_check_overflow(const secp256k1_scalar *a)
#define SECP256K1_N_H_1
#define extract_fast(n)
Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits.
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag)
static SECP256K1_INLINE int secp256k1_scalar_is_zero(const secp256k1_scalar *a)
#define SECP256K1_N_2
static SECP256K1_INLINE void secp256k1_write_be64(unsigned char *p, uint64_t x)
Definition: util.h:444
static SECP256K1_INLINE uint32_t secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count)
static SECP256K1_INLINE void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v)
#define sumadd(a)
Add a to the number defined by (c0,c1,c2).
#define SECP256K1_INLINE
Definition: util.h:54
static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x)
#define SECP256K1_N_C_1
static SECP256K1_INLINE uint64_t secp256k1_u128_to_u64(const secp256k1_uint128 *a)
static SECP256K1_INLINE void secp256k1_u128_accum_mul(secp256k1_uint128 *r, uint64_t a, uint64_t b)
#define sumadd_fast(a)
Add a to the number defined by (c0,c1).
static SECP256K1_INLINE int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigned int overflow)
static void secp256k1_scalar_to_signed62(secp256k1_modinv64_signed62 *r, const secp256k1_scalar *a)
static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag)
static const secp256k1_modinv64_modinfo secp256k1_const_modinfo_scalar
#define SECP256K1_N_1
static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag)
#define SECP256K1_N_H_0
static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo)
A scalar modulo the group order of the secp256k1 curve.
Definition: scalar_4x64.h:13
#define SECP256K1_N_C_2
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow)
#define SECP256K1_N_H_2
static SECP256K1_INLINE int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b)
#define extract(n)
Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits.
uint64_t d[4]
Definition: scalar_4x64.h:14
static void secp256k1_scalar_mul_512(uint64_t *l8, const secp256k1_scalar *a, const secp256k1_scalar *b)
static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_modinv64_signed62 *a)
#define SECP256K1_SCALAR_VERIFY(r)
Definition: scalar.h:103
static SECP256K1_INLINE int secp256k1_scalar_is_one(const secp256k1_scalar *a)
#define muladd_fast(a, b)
Add a*b to the number defined by (c0,c1).
#define muladd(a, b)
Add a*b to the number defined by (c0,c1,c2).
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b)
#define SECP256K1_N_H_3
static SECP256K1_INLINE void secp256k1_u128_accum_u64(secp256k1_uint128 *r, uint64_t a)
static SECP256K1_INLINE void secp256k1_u128_rshift(secp256k1_uint128 *r, unsigned int n)
static SECP256K1_INLINE void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift)
static void secp256k1_modinv64(secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo)
#define SECP256K1_CHECKMEM_MSAN_DEFINE(p, len)
Definition: checkmem.h:70
static int secp256k1_scalar_is_high(const secp256k1_scalar *a)
static int count
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b)
static SECP256K1_INLINE uint64_t secp256k1_u128_hi_u64(const secp256k1_uint128 *a)
static SECP256K1_INLINE uint64_t secp256k1_read_be64(const unsigned char *p)
Definition: util.h:432
#define SECP256K1_CHECKMEM_CHECK_VERIFY(p, len)
Definition: checkmem.h:114
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k)
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar *a)
#define SECP256K1_N_3
static SECP256K1_INLINE uint32_t secp256k1_scalar_get_bits_limb32(const secp256k1_scalar *a, unsigned int offset, unsigned int count)
#define SECP256K1_N_0
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a)
static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a)
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x)
#define SECP256K1_N_C_0
static SECP256K1_INLINE int secp256k1_scalar_is_even(const secp256k1_scalar *a)
static SECP256K1_INLINE void secp256k1_u128_from_u64(secp256k1_uint128 *r, uint64_t a)