Bitcoin Core  26.1.0
P2P Digital Currency
scalar_4x64_impl.h
Go to the documentation of this file.
1 /***********************************************************************
2  * Copyright (c) 2013, 2014 Pieter Wuille *
3  * Distributed under the MIT software license, see the accompanying *
4  * file COPYING or https://www.opensource.org/licenses/mit-license.php.*
5  ***********************************************************************/
6 
7 #ifndef SECP256K1_SCALAR_REPR_IMPL_H
8 #define SECP256K1_SCALAR_REPR_IMPL_H
9 
10 #include "checkmem.h"
11 #include "int128.h"
12 #include "modinv64_impl.h"
13 #include "util.h"
14 
15 /* Limbs of the secp256k1 order. */
16 #define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL)
17 #define SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL)
18 #define SECP256K1_N_2 ((uint64_t)0xFFFFFFFFFFFFFFFEULL)
19 #define SECP256K1_N_3 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
20 
21 /* Limbs of 2^256 minus the secp256k1 order. */
22 #define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1)
23 #define SECP256K1_N_C_1 (~SECP256K1_N_1)
24 #define SECP256K1_N_C_2 (1)
25 
26 /* Limbs of half the secp256k1 order. */
27 #define SECP256K1_N_H_0 ((uint64_t)0xDFE92F46681B20A0ULL)
28 #define SECP256K1_N_H_1 ((uint64_t)0x5D576E7357A4501DULL)
29 #define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
30 #define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL)
31 
33  r->d[0] = 0;
34  r->d[1] = 0;
35  r->d[2] = 0;
36  r->d[3] = 0;
37 }
38 
40  r->d[0] = v;
41  r->d[1] = 0;
42  r->d[2] = 0;
43  r->d[3] = 0;
44 
46 }
47 
48 SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
50  VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6);
51 
52  return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1);
53 }
54 
55 SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
57  VERIFY_CHECK(count < 32);
58  VERIFY_CHECK(offset + count <= 256);
59 
60  if ((offset + count - 1) >> 6 == offset >> 6) {
61  return secp256k1_scalar_get_bits(a, offset, count);
62  } else {
63  VERIFY_CHECK((offset >> 6) + 1 < 4);
64  return ((a->d[offset >> 6] >> (offset & 0x3F)) | (a->d[(offset >> 6) + 1] << (64 - (offset & 0x3F)))) & ((((uint64_t)1) << count) - 1);
65  }
66 }
67 
69  int yes = 0;
70  int no = 0;
71  no |= (a->d[3] < SECP256K1_N_3); /* No need for a > check. */
72  no |= (a->d[2] < SECP256K1_N_2);
73  yes |= (a->d[2] > SECP256K1_N_2) & ~no;
74  no |= (a->d[1] < SECP256K1_N_1);
75  yes |= (a->d[1] > SECP256K1_N_1) & ~no;
76  yes |= (a->d[0] >= SECP256K1_N_0) & ~no;
77  return yes;
78 }
79 
80 SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigned int overflow) {
82  VERIFY_CHECK(overflow <= 1);
83 
84  secp256k1_u128_from_u64(&t, r->d[0]);
87  secp256k1_u128_accum_u64(&t, r->d[1]);
90  secp256k1_u128_accum_u64(&t, r->d[2]);
93  secp256k1_u128_accum_u64(&t, r->d[3]);
94  r->d[3] = secp256k1_u128_to_u64(&t);
95 
97  return overflow;
98 }
99 
101  int overflow;
105 
106  secp256k1_u128_from_u64(&t, a->d[0]);
107  secp256k1_u128_accum_u64(&t, b->d[0]);
109  secp256k1_u128_accum_u64(&t, a->d[1]);
110  secp256k1_u128_accum_u64(&t, b->d[1]);
112  secp256k1_u128_accum_u64(&t, a->d[2]);
113  secp256k1_u128_accum_u64(&t, b->d[2]);
115  secp256k1_u128_accum_u64(&t, a->d[3]);
116  secp256k1_u128_accum_u64(&t, b->d[3]);
119  VERIFY_CHECK(overflow == 0 || overflow == 1);
120  secp256k1_scalar_reduce(r, overflow);
121 
123  return overflow;
124 }
125 
126 static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
128  volatile int vflag = flag;
130  VERIFY_CHECK(bit < 256);
131 
132  bit += ((uint32_t) vflag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */
133  secp256k1_u128_from_u64(&t, r->d[0]);
134  secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 0)) << (bit & 0x3F));
136  secp256k1_u128_accum_u64(&t, r->d[1]);
137  secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 1)) << (bit & 0x3F));
139  secp256k1_u128_accum_u64(&t, r->d[2]);
140  secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 2)) << (bit & 0x3F));
142  secp256k1_u128_accum_u64(&t, r->d[3]);
143  secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F));
144  r->d[3] = secp256k1_u128_to_u64(&t);
145 
147 #ifdef VERIFY
149 #endif
150 }
151 
152 static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
153  int over;
154  r->d[0] = secp256k1_read_be64(&b32[24]);
155  r->d[1] = secp256k1_read_be64(&b32[16]);
156  r->d[2] = secp256k1_read_be64(&b32[8]);
157  r->d[3] = secp256k1_read_be64(&b32[0]);
159  if (overflow) {
160  *overflow = over;
161  }
162 
164 }
165 
166 static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
168 
169  secp256k1_write_be64(&bin[0], a->d[3]);
170  secp256k1_write_be64(&bin[8], a->d[2]);
171  secp256k1_write_be64(&bin[16], a->d[1]);
172  secp256k1_write_be64(&bin[24], a->d[0]);
173 }
174 
177 
178  return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0;
179 }
180 
182  uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0);
185 
186  secp256k1_u128_from_u64(&t, ~a->d[0]);
188  r->d[0] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
189  secp256k1_u128_accum_u64(&t, ~a->d[1]);
191  r->d[1] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
192  secp256k1_u128_accum_u64(&t, ~a->d[2]);
194  r->d[2] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
195  secp256k1_u128_accum_u64(&t, ~a->d[3]);
197  r->d[3] = secp256k1_u128_to_u64(&t) & nonzero;
198 
200 }
201 
204 
205  return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0;
206 }
207 
209  int yes = 0;
210  int no = 0;
212 
213  no |= (a->d[3] < SECP256K1_N_H_3);
214  yes |= (a->d[3] > SECP256K1_N_H_3) & ~no;
215  no |= (a->d[2] < SECP256K1_N_H_2) & ~yes; /* No need for a > check. */
216  no |= (a->d[1] < SECP256K1_N_H_1) & ~yes;
217  yes |= (a->d[1] > SECP256K1_N_H_1) & ~no;
218  yes |= (a->d[0] > SECP256K1_N_H_0) & ~no;
219  return yes;
220 }
221 
223  /* If we are flag = 0, mask = 00...00 and this is a no-op;
224  * if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */
225  volatile int vflag = flag;
226  uint64_t mask = -vflag;
227  uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1;
230 
231  secp256k1_u128_from_u64(&t, r->d[0] ^ mask);
232  secp256k1_u128_accum_u64(&t, (SECP256K1_N_0 + 1) & mask);
233  r->d[0] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
234  secp256k1_u128_accum_u64(&t, r->d[1] ^ mask);
236  r->d[1] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
237  secp256k1_u128_accum_u64(&t, r->d[2] ^ mask);
239  r->d[2] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
240  secp256k1_u128_accum_u64(&t, r->d[3] ^ mask);
242  r->d[3] = secp256k1_u128_to_u64(&t) & nonzero;
243 
245  return 2 * (mask == 0) - 1;
246 }
247 
248 /* Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. */
249 
251 #define muladd(a,b) { \
252  uint64_t tl, th; \
253  { \
254  secp256k1_uint128 t; \
255  secp256k1_u128_mul(&t, a, b); \
256  th = secp256k1_u128_hi_u64(&t); /* at most 0xFFFFFFFFFFFFFFFE */ \
257  tl = secp256k1_u128_to_u64(&t); \
258  } \
259  c0 += tl; /* overflow is handled on the next line */ \
260  th += (c0 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \
261  c1 += th; /* overflow is handled on the next line */ \
262  c2 += (c1 < th); /* never overflows by contract (verified in the next line) */ \
263  VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
264 }
265 
267 #define muladd_fast(a,b) { \
268  uint64_t tl, th; \
269  { \
270  secp256k1_uint128 t; \
271  secp256k1_u128_mul(&t, a, b); \
272  th = secp256k1_u128_hi_u64(&t); /* at most 0xFFFFFFFFFFFFFFFE */ \
273  tl = secp256k1_u128_to_u64(&t); \
274  } \
275  c0 += tl; /* overflow is handled on the next line */ \
276  th += (c0 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \
277  c1 += th; /* never overflows by contract (verified in the next line) */ \
278  VERIFY_CHECK(c1 >= th); \
279 }
280 
282 #define sumadd(a) { \
283  unsigned int over; \
284  c0 += (a); /* overflow is handled on the next line */ \
285  over = (c0 < (a)); \
286  c1 += over; /* overflow is handled on the next line */ \
287  c2 += (c1 < over); /* never overflows by contract */ \
288 }
289 
291 #define sumadd_fast(a) { \
292  c0 += (a); /* overflow is handled on the next line */ \
293  c1 += (c0 < (a)); /* never overflows by contract (verified the next line) */ \
294  VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
295  VERIFY_CHECK(c2 == 0); \
296 }
297 
299 #define extract(n) { \
300  (n) = c0; \
301  c0 = c1; \
302  c1 = c2; \
303  c2 = 0; \
304 }
305 
307 #define extract_fast(n) { \
308  (n) = c0; \
309  c0 = c1; \
310  c1 = 0; \
311  VERIFY_CHECK(c2 == 0); \
312 }
313 
314 static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) {
315 #ifdef USE_ASM_X86_64
316  /* Reduce 512 bits into 385. */
317  uint64_t m0, m1, m2, m3, m4, m5, m6;
318  uint64_t p0, p1, p2, p3, p4;
319  uint64_t c;
320 
321  __asm__ __volatile__(
322  /* Preload. */
323  "movq 32(%%rsi), %%r11\n"
324  "movq 40(%%rsi), %%r12\n"
325  "movq 48(%%rsi), %%r13\n"
326  "movq 56(%%rsi), %%r14\n"
327  /* Initialize r8,r9,r10 */
328  "movq 0(%%rsi), %%r8\n"
329  "xorq %%r9, %%r9\n"
330  "xorq %%r10, %%r10\n"
331  /* (r8,r9) += n0 * c0 */
332  "movq %8, %%rax\n"
333  "mulq %%r11\n"
334  "addq %%rax, %%r8\n"
335  "adcq %%rdx, %%r9\n"
336  /* extract m0 */
337  "movq %%r8, %q0\n"
338  "xorq %%r8, %%r8\n"
339  /* (r9,r10) += l1 */
340  "addq 8(%%rsi), %%r9\n"
341  "adcq $0, %%r10\n"
342  /* (r9,r10,r8) += n1 * c0 */
343  "movq %8, %%rax\n"
344  "mulq %%r12\n"
345  "addq %%rax, %%r9\n"
346  "adcq %%rdx, %%r10\n"
347  "adcq $0, %%r8\n"
348  /* (r9,r10,r8) += n0 * c1 */
349  "movq %9, %%rax\n"
350  "mulq %%r11\n"
351  "addq %%rax, %%r9\n"
352  "adcq %%rdx, %%r10\n"
353  "adcq $0, %%r8\n"
354  /* extract m1 */
355  "movq %%r9, %q1\n"
356  "xorq %%r9, %%r9\n"
357  /* (r10,r8,r9) += l2 */
358  "addq 16(%%rsi), %%r10\n"
359  "adcq $0, %%r8\n"
360  "adcq $0, %%r9\n"
361  /* (r10,r8,r9) += n2 * c0 */
362  "movq %8, %%rax\n"
363  "mulq %%r13\n"
364  "addq %%rax, %%r10\n"
365  "adcq %%rdx, %%r8\n"
366  "adcq $0, %%r9\n"
367  /* (r10,r8,r9) += n1 * c1 */
368  "movq %9, %%rax\n"
369  "mulq %%r12\n"
370  "addq %%rax, %%r10\n"
371  "adcq %%rdx, %%r8\n"
372  "adcq $0, %%r9\n"
373  /* (r10,r8,r9) += n0 */
374  "addq %%r11, %%r10\n"
375  "adcq $0, %%r8\n"
376  "adcq $0, %%r9\n"
377  /* extract m2 */
378  "movq %%r10, %q2\n"
379  "xorq %%r10, %%r10\n"
380  /* (r8,r9,r10) += l3 */
381  "addq 24(%%rsi), %%r8\n"
382  "adcq $0, %%r9\n"
383  "adcq $0, %%r10\n"
384  /* (r8,r9,r10) += n3 * c0 */
385  "movq %8, %%rax\n"
386  "mulq %%r14\n"
387  "addq %%rax, %%r8\n"
388  "adcq %%rdx, %%r9\n"
389  "adcq $0, %%r10\n"
390  /* (r8,r9,r10) += n2 * c1 */
391  "movq %9, %%rax\n"
392  "mulq %%r13\n"
393  "addq %%rax, %%r8\n"
394  "adcq %%rdx, %%r9\n"
395  "adcq $0, %%r10\n"
396  /* (r8,r9,r10) += n1 */
397  "addq %%r12, %%r8\n"
398  "adcq $0, %%r9\n"
399  "adcq $0, %%r10\n"
400  /* extract m3 */
401  "movq %%r8, %q3\n"
402  "xorq %%r8, %%r8\n"
403  /* (r9,r10,r8) += n3 * c1 */
404  "movq %9, %%rax\n"
405  "mulq %%r14\n"
406  "addq %%rax, %%r9\n"
407  "adcq %%rdx, %%r10\n"
408  "adcq $0, %%r8\n"
409  /* (r9,r10,r8) += n2 */
410  "addq %%r13, %%r9\n"
411  "adcq $0, %%r10\n"
412  "adcq $0, %%r8\n"
413  /* extract m4 */
414  "movq %%r9, %q4\n"
415  /* (r10,r8) += n3 */
416  "addq %%r14, %%r10\n"
417  "adcq $0, %%r8\n"
418  /* extract m5 */
419  "movq %%r10, %q5\n"
420  /* extract m6 */
421  "movq %%r8, %q6\n"
422  : "=&g"(m0), "=&g"(m1), "=&g"(m2), "=g"(m3), "=g"(m4), "=g"(m5), "=g"(m6)
423  : "S"(l), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1)
424  : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc");
425 
426  /* Reduce 385 bits into 258. */
427  __asm__ __volatile__(
428  /* Preload */
429  "movq %q9, %%r11\n"
430  "movq %q10, %%r12\n"
431  "movq %q11, %%r13\n"
432  /* Initialize (r8,r9,r10) */
433  "movq %q5, %%r8\n"
434  "xorq %%r9, %%r9\n"
435  "xorq %%r10, %%r10\n"
436  /* (r8,r9) += m4 * c0 */
437  "movq %12, %%rax\n"
438  "mulq %%r11\n"
439  "addq %%rax, %%r8\n"
440  "adcq %%rdx, %%r9\n"
441  /* extract p0 */
442  "movq %%r8, %q0\n"
443  "xorq %%r8, %%r8\n"
444  /* (r9,r10) += m1 */
445  "addq %q6, %%r9\n"
446  "adcq $0, %%r10\n"
447  /* (r9,r10,r8) += m5 * c0 */
448  "movq %12, %%rax\n"
449  "mulq %%r12\n"
450  "addq %%rax, %%r9\n"
451  "adcq %%rdx, %%r10\n"
452  "adcq $0, %%r8\n"
453  /* (r9,r10,r8) += m4 * c1 */
454  "movq %13, %%rax\n"
455  "mulq %%r11\n"
456  "addq %%rax, %%r9\n"
457  "adcq %%rdx, %%r10\n"
458  "adcq $0, %%r8\n"
459  /* extract p1 */
460  "movq %%r9, %q1\n"
461  "xorq %%r9, %%r9\n"
462  /* (r10,r8,r9) += m2 */
463  "addq %q7, %%r10\n"
464  "adcq $0, %%r8\n"
465  "adcq $0, %%r9\n"
466  /* (r10,r8,r9) += m6 * c0 */
467  "movq %12, %%rax\n"
468  "mulq %%r13\n"
469  "addq %%rax, %%r10\n"
470  "adcq %%rdx, %%r8\n"
471  "adcq $0, %%r9\n"
472  /* (r10,r8,r9) += m5 * c1 */
473  "movq %13, %%rax\n"
474  "mulq %%r12\n"
475  "addq %%rax, %%r10\n"
476  "adcq %%rdx, %%r8\n"
477  "adcq $0, %%r9\n"
478  /* (r10,r8,r9) += m4 */
479  "addq %%r11, %%r10\n"
480  "adcq $0, %%r8\n"
481  "adcq $0, %%r9\n"
482  /* extract p2 */
483  "movq %%r10, %q2\n"
484  /* (r8,r9) += m3 */
485  "addq %q8, %%r8\n"
486  "adcq $0, %%r9\n"
487  /* (r8,r9) += m6 * c1 */
488  "movq %13, %%rax\n"
489  "mulq %%r13\n"
490  "addq %%rax, %%r8\n"
491  "adcq %%rdx, %%r9\n"
492  /* (r8,r9) += m5 */
493  "addq %%r12, %%r8\n"
494  "adcq $0, %%r9\n"
495  /* extract p3 */
496  "movq %%r8, %q3\n"
497  /* (r9) += m6 */
498  "addq %%r13, %%r9\n"
499  /* extract p4 */
500  "movq %%r9, %q4\n"
501  : "=&g"(p0), "=&g"(p1), "=&g"(p2), "=g"(p3), "=g"(p4)
502  : "g"(m0), "g"(m1), "g"(m2), "g"(m3), "g"(m4), "g"(m5), "g"(m6), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1)
503  : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "cc");
504 
505  /* Reduce 258 bits into 256. */
506  __asm__ __volatile__(
507  /* Preload */
508  "movq %q5, %%r10\n"
509  /* (rax,rdx) = p4 * c0 */
510  "movq %7, %%rax\n"
511  "mulq %%r10\n"
512  /* (rax,rdx) += p0 */
513  "addq %q1, %%rax\n"
514  "adcq $0, %%rdx\n"
515  /* extract r0 */
516  "movq %%rax, 0(%q6)\n"
517  /* Move to (r8,r9) */
518  "movq %%rdx, %%r8\n"
519  "xorq %%r9, %%r9\n"
520  /* (r8,r9) += p1 */
521  "addq %q2, %%r8\n"
522  "adcq $0, %%r9\n"
523  /* (r8,r9) += p4 * c1 */
524  "movq %8, %%rax\n"
525  "mulq %%r10\n"
526  "addq %%rax, %%r8\n"
527  "adcq %%rdx, %%r9\n"
528  /* Extract r1 */
529  "movq %%r8, 8(%q6)\n"
530  "xorq %%r8, %%r8\n"
531  /* (r9,r8) += p4 */
532  "addq %%r10, %%r9\n"
533  "adcq $0, %%r8\n"
534  /* (r9,r8) += p2 */
535  "addq %q3, %%r9\n"
536  "adcq $0, %%r8\n"
537  /* Extract r2 */
538  "movq %%r9, 16(%q6)\n"
539  "xorq %%r9, %%r9\n"
540  /* (r8,r9) += p3 */
541  "addq %q4, %%r8\n"
542  "adcq $0, %%r9\n"
543  /* Extract r3 */
544  "movq %%r8, 24(%q6)\n"
545  /* Extract c */
546  "movq %%r9, %q0\n"
547  : "=g"(c)
548  : "g"(p0), "g"(p1), "g"(p2), "g"(p3), "g"(p4), "D"(r), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1)
549  : "rax", "rdx", "r8", "r9", "r10", "cc", "memory");
550 #else
551  secp256k1_uint128 c128;
552  uint64_t c, c0, c1, c2;
553  uint64_t n0 = l[4], n1 = l[5], n2 = l[6], n3 = l[7];
554  uint64_t m0, m1, m2, m3, m4, m5;
555  uint32_t m6;
556  uint64_t p0, p1, p2, p3;
557  uint32_t p4;
558 
559  /* Reduce 512 bits into 385. */
560  /* m[0..6] = l[0..3] + n[0..3] * SECP256K1_N_C. */
561  c0 = l[0]; c1 = 0; c2 = 0;
563  extract_fast(m0);
564  sumadd_fast(l[1]);
565  muladd(n1, SECP256K1_N_C_0);
566  muladd(n0, SECP256K1_N_C_1);
567  extract(m1);
568  sumadd(l[2]);
569  muladd(n2, SECP256K1_N_C_0);
570  muladd(n1, SECP256K1_N_C_1);
571  sumadd(n0);
572  extract(m2);
573  sumadd(l[3]);
574  muladd(n3, SECP256K1_N_C_0);
575  muladd(n2, SECP256K1_N_C_1);
576  sumadd(n1);
577  extract(m3);
578  muladd(n3, SECP256K1_N_C_1);
579  sumadd(n2);
580  extract(m4);
581  sumadd_fast(n3);
582  extract_fast(m5);
583  VERIFY_CHECK(c0 <= 1);
584  m6 = c0;
585 
586  /* Reduce 385 bits into 258. */
587  /* p[0..4] = m[0..3] + m[4..6] * SECP256K1_N_C. */
588  c0 = m0; c1 = 0; c2 = 0;
590  extract_fast(p0);
591  sumadd_fast(m1);
592  muladd(m5, SECP256K1_N_C_0);
593  muladd(m4, SECP256K1_N_C_1);
594  extract(p1);
595  sumadd(m2);
596  muladd(m6, SECP256K1_N_C_0);
597  muladd(m5, SECP256K1_N_C_1);
598  sumadd(m4);
599  extract(p2);
600  sumadd_fast(m3);
602  sumadd_fast(m5);
603  extract_fast(p3);
604  p4 = c0 + m6;
605  VERIFY_CHECK(p4 <= 2);
606 
607  /* Reduce 258 bits into 256. */
608  /* r[0..3] = p[0..3] + p[4] * SECP256K1_N_C. */
609  secp256k1_u128_from_u64(&c128, p0);
611  r->d[0] = secp256k1_u128_to_u64(&c128); secp256k1_u128_rshift(&c128, 64);
612  secp256k1_u128_accum_u64(&c128, p1);
614  r->d[1] = secp256k1_u128_to_u64(&c128); secp256k1_u128_rshift(&c128, 64);
615  secp256k1_u128_accum_u64(&c128, p2);
616  secp256k1_u128_accum_u64(&c128, p4);
617  r->d[2] = secp256k1_u128_to_u64(&c128); secp256k1_u128_rshift(&c128, 64);
618  secp256k1_u128_accum_u64(&c128, p3);
619  r->d[3] = secp256k1_u128_to_u64(&c128);
620  c = secp256k1_u128_hi_u64(&c128);
621 #endif
622 
623  /* Final reduction of r. */
625 }
626 
627 static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, const secp256k1_scalar *b) {
628 #ifdef USE_ASM_X86_64
629  const uint64_t *pb = b->d;
630  __asm__ __volatile__(
631  /* Preload */
632  "movq 0(%%rdi), %%r15\n"
633  "movq 8(%%rdi), %%rbx\n"
634  "movq 16(%%rdi), %%rcx\n"
635  "movq 0(%%rdx), %%r11\n"
636  "movq 8(%%rdx), %%r12\n"
637  "movq 16(%%rdx), %%r13\n"
638  "movq 24(%%rdx), %%r14\n"
639  /* (rax,rdx) = a0 * b0 */
640  "movq %%r15, %%rax\n"
641  "mulq %%r11\n"
642  /* Extract l0 */
643  "movq %%rax, 0(%%rsi)\n"
644  /* (r8,r9,r10) = (rdx) */
645  "movq %%rdx, %%r8\n"
646  "xorq %%r9, %%r9\n"
647  "xorq %%r10, %%r10\n"
648  /* (r8,r9,r10) += a0 * b1 */
649  "movq %%r15, %%rax\n"
650  "mulq %%r12\n"
651  "addq %%rax, %%r8\n"
652  "adcq %%rdx, %%r9\n"
653  "adcq $0, %%r10\n"
654  /* (r8,r9,r10) += a1 * b0 */
655  "movq %%rbx, %%rax\n"
656  "mulq %%r11\n"
657  "addq %%rax, %%r8\n"
658  "adcq %%rdx, %%r9\n"
659  "adcq $0, %%r10\n"
660  /* Extract l1 */
661  "movq %%r8, 8(%%rsi)\n"
662  "xorq %%r8, %%r8\n"
663  /* (r9,r10,r8) += a0 * b2 */
664  "movq %%r15, %%rax\n"
665  "mulq %%r13\n"
666  "addq %%rax, %%r9\n"
667  "adcq %%rdx, %%r10\n"
668  "adcq $0, %%r8\n"
669  /* (r9,r10,r8) += a1 * b1 */
670  "movq %%rbx, %%rax\n"
671  "mulq %%r12\n"
672  "addq %%rax, %%r9\n"
673  "adcq %%rdx, %%r10\n"
674  "adcq $0, %%r8\n"
675  /* (r9,r10,r8) += a2 * b0 */
676  "movq %%rcx, %%rax\n"
677  "mulq %%r11\n"
678  "addq %%rax, %%r9\n"
679  "adcq %%rdx, %%r10\n"
680  "adcq $0, %%r8\n"
681  /* Extract l2 */
682  "movq %%r9, 16(%%rsi)\n"
683  "xorq %%r9, %%r9\n"
684  /* (r10,r8,r9) += a0 * b3 */
685  "movq %%r15, %%rax\n"
686  "mulq %%r14\n"
687  "addq %%rax, %%r10\n"
688  "adcq %%rdx, %%r8\n"
689  "adcq $0, %%r9\n"
690  /* Preload a3 */
691  "movq 24(%%rdi), %%r15\n"
692  /* (r10,r8,r9) += a1 * b2 */
693  "movq %%rbx, %%rax\n"
694  "mulq %%r13\n"
695  "addq %%rax, %%r10\n"
696  "adcq %%rdx, %%r8\n"
697  "adcq $0, %%r9\n"
698  /* (r10,r8,r9) += a2 * b1 */
699  "movq %%rcx, %%rax\n"
700  "mulq %%r12\n"
701  "addq %%rax, %%r10\n"
702  "adcq %%rdx, %%r8\n"
703  "adcq $0, %%r9\n"
704  /* (r10,r8,r9) += a3 * b0 */
705  "movq %%r15, %%rax\n"
706  "mulq %%r11\n"
707  "addq %%rax, %%r10\n"
708  "adcq %%rdx, %%r8\n"
709  "adcq $0, %%r9\n"
710  /* Extract l3 */
711  "movq %%r10, 24(%%rsi)\n"
712  "xorq %%r10, %%r10\n"
713  /* (r8,r9,r10) += a1 * b3 */
714  "movq %%rbx, %%rax\n"
715  "mulq %%r14\n"
716  "addq %%rax, %%r8\n"
717  "adcq %%rdx, %%r9\n"
718  "adcq $0, %%r10\n"
719  /* (r8,r9,r10) += a2 * b2 */
720  "movq %%rcx, %%rax\n"
721  "mulq %%r13\n"
722  "addq %%rax, %%r8\n"
723  "adcq %%rdx, %%r9\n"
724  "adcq $0, %%r10\n"
725  /* (r8,r9,r10) += a3 * b1 */
726  "movq %%r15, %%rax\n"
727  "mulq %%r12\n"
728  "addq %%rax, %%r8\n"
729  "adcq %%rdx, %%r9\n"
730  "adcq $0, %%r10\n"
731  /* Extract l4 */
732  "movq %%r8, 32(%%rsi)\n"
733  "xorq %%r8, %%r8\n"
734  /* (r9,r10,r8) += a2 * b3 */
735  "movq %%rcx, %%rax\n"
736  "mulq %%r14\n"
737  "addq %%rax, %%r9\n"
738  "adcq %%rdx, %%r10\n"
739  "adcq $0, %%r8\n"
740  /* (r9,r10,r8) += a3 * b2 */
741  "movq %%r15, %%rax\n"
742  "mulq %%r13\n"
743  "addq %%rax, %%r9\n"
744  "adcq %%rdx, %%r10\n"
745  "adcq $0, %%r8\n"
746  /* Extract l5 */
747  "movq %%r9, 40(%%rsi)\n"
748  /* (r10,r8) += a3 * b3 */
749  "movq %%r15, %%rax\n"
750  "mulq %%r14\n"
751  "addq %%rax, %%r10\n"
752  "adcq %%rdx, %%r8\n"
753  /* Extract l6 */
754  "movq %%r10, 48(%%rsi)\n"
755  /* Extract l7 */
756  "movq %%r8, 56(%%rsi)\n"
757  : "+d"(pb)
758  : "S"(l), "D"(a->d)
759  : "rax", "rbx", "rcx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "cc", "memory");
760 #else
761  /* 160 bit accumulator. */
762  uint64_t c0 = 0, c1 = 0;
763  uint32_t c2 = 0;
764 
765  /* l[0..7] = a[0..3] * b[0..3]. */
766  muladd_fast(a->d[0], b->d[0]);
767  extract_fast(l[0]);
768  muladd(a->d[0], b->d[1]);
769  muladd(a->d[1], b->d[0]);
770  extract(l[1]);
771  muladd(a->d[0], b->d[2]);
772  muladd(a->d[1], b->d[1]);
773  muladd(a->d[2], b->d[0]);
774  extract(l[2]);
775  muladd(a->d[0], b->d[3]);
776  muladd(a->d[1], b->d[2]);
777  muladd(a->d[2], b->d[1]);
778  muladd(a->d[3], b->d[0]);
779  extract(l[3]);
780  muladd(a->d[1], b->d[3]);
781  muladd(a->d[2], b->d[2]);
782  muladd(a->d[3], b->d[1]);
783  extract(l[4]);
784  muladd(a->d[2], b->d[3]);
785  muladd(a->d[3], b->d[2]);
786  extract(l[5]);
787  muladd_fast(a->d[3], b->d[3]);
788  extract_fast(l[6]);
789  VERIFY_CHECK(c1 == 0);
790  l[7] = c0;
791 #endif
792 }
793 
794 #undef sumadd
795 #undef sumadd_fast
796 #undef muladd
797 #undef muladd_fast
798 #undef extract
799 #undef extract_fast
800 
802  uint64_t l[8];
805 
806  secp256k1_scalar_mul_512(l, a, b);
808 
810 }
811 
813  int ret;
815  VERIFY_CHECK(n > 0);
816  VERIFY_CHECK(n < 16);
817 
818  ret = r->d[0] & ((1 << n) - 1);
819  r->d[0] = (r->d[0] >> n) + (r->d[1] << (64 - n));
820  r->d[1] = (r->d[1] >> n) + (r->d[2] << (64 - n));
821  r->d[2] = (r->d[2] >> n) + (r->d[3] << (64 - n));
822  r->d[3] = (r->d[3] >> n);
823 
825  return ret;
826 }
827 
830 
831  r1->d[0] = k->d[0];
832  r1->d[1] = k->d[1];
833  r1->d[2] = 0;
834  r1->d[3] = 0;
835  r2->d[0] = k->d[2];
836  r2->d[1] = k->d[3];
837  r2->d[2] = 0;
838  r2->d[3] = 0;
839 
842 }
843 
847 
848  return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0;
849 }
850 
852  uint64_t l[8];
853  unsigned int shiftlimbs;
854  unsigned int shiftlow;
855  unsigned int shifthigh;
858  VERIFY_CHECK(shift >= 256);
859 
860  secp256k1_scalar_mul_512(l, a, b);
861  shiftlimbs = shift >> 6;
862  shiftlow = shift & 0x3F;
863  shifthigh = 64 - shiftlow;
864  r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0;
865  r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
866  r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
867  r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0;
868  secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1);
869 
871 }
872 
874  uint64_t mask0, mask1;
875  volatile int vflag = flag;
877  SECP256K1_CHECKMEM_CHECK_VERIFY(r->d, sizeof(r->d));
878 
879  mask0 = vflag + ~((uint64_t)0);
880  mask1 = ~mask0;
881  r->d[0] = (r->d[0] & mask0) | (a->d[0] & mask1);
882  r->d[1] = (r->d[1] & mask0) | (a->d[1] & mask1);
883  r->d[2] = (r->d[2] & mask0) | (a->d[2] & mask1);
884  r->d[3] = (r->d[3] & mask0) | (a->d[3] & mask1);
885 
887 }
888 
890  const uint64_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4];
891 
892  /* The output from secp256k1_modinv64{_var} should be normalized to range [0,modulus), and
893  * have limbs in [0,2^62). The modulus is < 2^256, so the top limb must be below 2^(256-62*4).
894  */
895  VERIFY_CHECK(a0 >> 62 == 0);
896  VERIFY_CHECK(a1 >> 62 == 0);
897  VERIFY_CHECK(a2 >> 62 == 0);
898  VERIFY_CHECK(a3 >> 62 == 0);
899  VERIFY_CHECK(a4 >> 8 == 0);
900 
901  r->d[0] = a0 | a1 << 62;
902  r->d[1] = a1 >> 2 | a2 << 60;
903  r->d[2] = a2 >> 4 | a3 << 58;
904  r->d[3] = a3 >> 6 | a4 << 56;
905 
907 }
908 
910  const uint64_t M62 = UINT64_MAX >> 2;
911  const uint64_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3];
913 
914  r->v[0] = a0 & M62;
915  r->v[1] = (a0 >> 62 | a1 << 2) & M62;
916  r->v[2] = (a1 >> 60 | a2 << 4) & M62;
917  r->v[3] = (a2 >> 58 | a3 << 6) & M62;
918  r->v[4] = a3 >> 56;
919 }
920 
922  {{0x3FD25E8CD0364141LL, 0x2ABB739ABD2280EELL, -0x15LL, 0, 256}},
923  0x34F20099AA774EC1LL
924 };
925 
928 #ifdef VERIFY
929  int zero_in = secp256k1_scalar_is_zero(x);
930 #endif
932 
936 
938 #ifdef VERIFY
939  VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
940 #endif
941 }
942 
945 #ifdef VERIFY
946  int zero_in = secp256k1_scalar_is_zero(x);
947 #endif
949 
953 
955 #ifdef VERIFY
956  VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
957 #endif
958 }
959 
962 
963  return !(a->d[0] & 1);
964 }
965 
966 #endif /* SECP256K1_SCALAR_REPR_IMPL_H */
#define VERIFY_CHECK(cond)
Definition: util.h:143
static SECP256K1_INLINE int secp256k1_scalar_check_overflow(const secp256k1_scalar *a)
int ret
#define SECP256K1_N_H_1
#define extract_fast(n)
Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits.
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag)
static SECP256K1_INLINE void secp256k1_u128_accum_u64(secp256k1_uint128 *r, uint64_t a)
static SECP256K1_INLINE int secp256k1_scalar_is_zero(const secp256k1_scalar *a)
#define SECP256K1_N_2
static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, const secp256k1_scalar *b)
static SECP256K1_INLINE void secp256k1_u128_rshift(secp256k1_uint128 *r, unsigned int n)
static SECP256K1_INLINE void secp256k1_write_be64(unsigned char *p, uint64_t x)
Definition: util.h:374
static SECP256K1_INLINE void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v)
#define sumadd(a)
Add a to the number defined by (c0,c1,c2).
#define SECP256K1_INLINE
Definition: util.h:48
static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x)
#define SECP256K1_N_C_1
#define sumadd_fast(a)
Add a to the number defined by (c0,c1).
static SECP256K1_INLINE int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigned int overflow)
static SECP256K1_INLINE unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count)
static void secp256k1_scalar_to_signed62(secp256k1_modinv64_signed62 *r, const secp256k1_scalar *a)
static SECP256K1_INLINE uint64_t secp256k1_u128_hi_u64(const secp256k1_uint128 *a)
static SECP256K1_INLINE void secp256k1_u128_from_u64(secp256k1_uint128 *r, uint64_t a)
static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag)
static SECP256K1_INLINE void secp256k1_u128_accum_mul(secp256k1_uint128 *r, uint64_t a, uint64_t b)
static SECP256K1_INLINE void secp256k1_scalar_clear(secp256k1_scalar *r)
static const secp256k1_modinv64_modinfo secp256k1_const_modinfo_scalar
#define SECP256K1_N_1
static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag)
#define SECP256K1_N_H_0
static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo)
A scalar modulo the group order of the secp256k1 curve.
Definition: scalar_4x64.h:13
#define SECP256K1_N_C_2
static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n)
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow)
#define SECP256K1_N_H_2
static SECP256K1_INLINE int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b)
#define extract(n)
Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits.
uint64_t d[4]
Definition: scalar_4x64.h:14
static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_modinv64_signed62 *a)
static SECP256K1_INLINE int secp256k1_scalar_is_one(const secp256k1_scalar *a)
#define muladd_fast(a, b)
Add a*b to the number defined by (c0,c1).
#define muladd(a, b)
Add a*b to the number defined by (c0,c1,c2).
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b)
#define SECP256K1_N_H_3
static SECP256K1_INLINE void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift)
static void secp256k1_modinv64(secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo)
static int secp256k1_scalar_is_high(const secp256k1_scalar *a)
static int count
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b)
static SECP256K1_INLINE uint64_t secp256k1_read_be64(const unsigned char *p)
Definition: util.h:362
#define SECP256K1_CHECKMEM_CHECK_VERIFY(p, len)
Definition: checkmem.h:92
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k)
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar *a)
#define SECP256K1_N_3
#define SECP256K1_N_0
static SECP256K1_INLINE unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count)
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a)
static void secp256k1_scalar_verify(const secp256k1_scalar *r)
Check invariants on a scalar (no-op unless VERIFY is enabled).
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x)
#define SECP256K1_N_C_0
static SECP256K1_INLINE int secp256k1_scalar_is_even(const secp256k1_scalar *a)
static SECP256K1_INLINE uint64_t secp256k1_u128_to_u64(const secp256k1_uint128 *a)