7 #ifndef SECP256K1_SCALAR_REPR_IMPL_H 8 #define SECP256K1_SCALAR_REPR_IMPL_H 16 #define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL) 17 #define SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL) 18 #define SECP256K1_N_2 ((uint64_t)0xFFFFFFFFFFFFFFFEULL) 19 #define SECP256K1_N_3 ((uint64_t)0xFFFFFFFFFFFFFFFFULL) 22 #define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1) 23 #define SECP256K1_N_C_1 (~SECP256K1_N_1) 24 #define SECP256K1_N_C_2 (1) 27 #define SECP256K1_N_H_0 ((uint64_t)0xDFE92F46681B20A0ULL) 28 #define SECP256K1_N_H_1 ((uint64_t)0x5D576E7357A4501DULL) 29 #define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL) 30 #define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL) 52 return (a->
d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) <<
count) - 1);
60 if ((offset +
count - 1) >> 6 == offset >> 6) {
64 return ((a->
d[offset >> 6] >> (offset & 0x3F)) | (a->
d[(offset >> 6) + 1] << (64 - (offset & 0x3F)))) & ((((uint64_t)1) <<
count) - 1);
128 volatile int vflag = flag;
132 bit += ((uint32_t) vflag - 1) & 0x100;
178 return (a->
d[0] | a->
d[1] | a->
d[2] | a->
d[3]) == 0;
205 return ((a->
d[0] ^ 1) | a->
d[1] | a->
d[2] | a->
d[3]) == 0;
225 volatile int vflag = flag;
226 uint64_t mask = -vflag;
245 return 2 * (mask == 0) - 1;
251 #define muladd(a,b) { \ 254 secp256k1_uint128 t; \ 255 secp256k1_u128_mul(&t, a, b); \ 256 th = secp256k1_u128_hi_u64(&t); \ 257 tl = secp256k1_u128_to_u64(&t); \ 263 VERIFY_CHECK((c1 >= th) || (c2 != 0)); \ 267 #define muladd_fast(a,b) { \ 270 secp256k1_uint128 t; \ 271 secp256k1_u128_mul(&t, a, b); \ 272 th = secp256k1_u128_hi_u64(&t); \ 273 tl = secp256k1_u128_to_u64(&t); \ 278 VERIFY_CHECK(c1 >= th); \ 282 #define sumadd(a) { \ 291 #define sumadd_fast(a) { \ 294 VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \ 295 VERIFY_CHECK(c2 == 0); \ 299 #define extract(n) { \ 307 #define extract_fast(n) { \ 311 VERIFY_CHECK(c2 == 0); \ 315 #ifdef USE_ASM_X86_64 317 uint64_t m0, m1, m2, m3, m4, m5, m6;
318 uint64_t p0, p1, p2, p3, p4;
321 __asm__ __volatile__(
323 "movq 32(%%rsi), %%r11\n" 324 "movq 40(%%rsi), %%r12\n" 325 "movq 48(%%rsi), %%r13\n" 326 "movq 56(%%rsi), %%r14\n" 328 "movq 0(%%rsi), %%r8\n" 330 "xorq %%r10, %%r10\n" 340 "addq 8(%%rsi), %%r9\n" 346 "adcq %%rdx, %%r10\n" 352 "adcq %%rdx, %%r10\n" 358 "addq 16(%%rsi), %%r10\n" 364 "addq %%rax, %%r10\n" 370 "addq %%rax, %%r10\n" 374 "addq %%r11, %%r10\n" 379 "xorq %%r10, %%r10\n" 381 "addq 24(%%rsi), %%r8\n" 407 "adcq %%rdx, %%r10\n" 416 "addq %%r14, %%r10\n" 422 :
"=&g"(m0),
"=&g"(m1),
"=&g"(m2),
"=g"(m3),
"=g"(m4),
"=g"(m5),
"=g"(m6)
424 :
"rax",
"rdx",
"r8",
"r9",
"r10",
"r11",
"r12",
"r13",
"r14",
"cc");
427 __asm__ __volatile__(
435 "xorq %%r10, %%r10\n" 451 "adcq %%rdx, %%r10\n" 457 "adcq %%rdx, %%r10\n" 469 "addq %%rax, %%r10\n" 475 "addq %%rax, %%r10\n" 479 "addq %%r11, %%r10\n" 501 :
"=&g"(p0),
"=&g"(p1),
"=&g"(p2),
"=g"(p3),
"=g"(p4)
503 :
"rax",
"rdx",
"r8",
"r9",
"r10",
"r11",
"r12",
"r13",
"cc");
506 __asm__ __volatile__(
516 "movq %%rax, 0(%q6)\n" 529 "movq %%r8, 8(%q6)\n" 538 "movq %%r9, 16(%q6)\n" 544 "movq %%r8, 24(%q6)\n" 549 :
"rax",
"rdx",
"r8",
"r9",
"r10",
"cc",
"memory");
552 uint64_t c, c0, c1, c2;
553 uint64_t n0 = l[4], n1 = l[5], n2 = l[6], n3 = l[7];
554 uint64_t m0, m1, m2, m3, m4, m5;
556 uint64_t p0, p1, p2, p3;
561 c0 = l[0]; c1 = 0; c2 = 0;
588 c0 = m0; c1 = 0; c2 = 0;
628 #ifdef USE_ASM_X86_64 629 const uint64_t *pb = b->
d;
630 __asm__ __volatile__(
632 "movq 0(%%rdi), %%r15\n" 633 "movq 8(%%rdi), %%rbx\n" 634 "movq 16(%%rdi), %%rcx\n" 635 "movq 0(%%rdx), %%r11\n" 636 "movq 8(%%rdx), %%r12\n" 637 "movq 16(%%rdx), %%r13\n" 638 "movq 24(%%rdx), %%r14\n" 640 "movq %%r15, %%rax\n" 643 "movq %%rax, 0(%%rsi)\n" 647 "xorq %%r10, %%r10\n" 649 "movq %%r15, %%rax\n" 655 "movq %%rbx, %%rax\n" 661 "movq %%r8, 8(%%rsi)\n" 664 "movq %%r15, %%rax\n" 667 "adcq %%rdx, %%r10\n" 670 "movq %%rbx, %%rax\n" 673 "adcq %%rdx, %%r10\n" 676 "movq %%rcx, %%rax\n" 679 "adcq %%rdx, %%r10\n" 682 "movq %%r9, 16(%%rsi)\n" 685 "movq %%r15, %%rax\n" 687 "addq %%rax, %%r10\n" 691 "movq 24(%%rdi), %%r15\n" 693 "movq %%rbx, %%rax\n" 695 "addq %%rax, %%r10\n" 699 "movq %%rcx, %%rax\n" 701 "addq %%rax, %%r10\n" 705 "movq %%r15, %%rax\n" 707 "addq %%rax, %%r10\n" 711 "movq %%r10, 24(%%rsi)\n" 712 "xorq %%r10, %%r10\n" 714 "movq %%rbx, %%rax\n" 720 "movq %%rcx, %%rax\n" 726 "movq %%r15, %%rax\n" 732 "movq %%r8, 32(%%rsi)\n" 735 "movq %%rcx, %%rax\n" 738 "adcq %%rdx, %%r10\n" 741 "movq %%r15, %%rax\n" 744 "adcq %%rdx, %%r10\n" 747 "movq %%r9, 40(%%rsi)\n" 749 "movq %%r15, %%rax\n" 751 "addq %%rax, %%r10\n" 754 "movq %%r10, 48(%%rsi)\n" 756 "movq %%r8, 56(%%rsi)\n" 759 :
"rax",
"rbx",
"rcx",
"r8",
"r9",
"r10",
"r11",
"r12",
"r13",
"r14",
"r15",
"cc",
"memory");
762 uint64_t c0 = 0, c1 = 0;
818 ret = r->
d[0] & ((1 << n) - 1);
819 r->
d[0] = (r->
d[0] >> n) + (r->
d[1] << (64 - n));
820 r->
d[1] = (r->
d[1] >> n) + (r->
d[2] << (64 - n));
821 r->
d[2] = (r->
d[2] >> n) + (r->
d[3] << (64 - n));
822 r->
d[3] = (r->
d[3] >> n);
848 return ((a->
d[0] ^ b->
d[0]) | (a->
d[1] ^ b->
d[1]) | (a->
d[2] ^ b->
d[2]) | (a->
d[3] ^ b->
d[3])) == 0;
853 unsigned int shiftlimbs;
854 unsigned int shiftlow;
855 unsigned int shifthigh;
861 shiftlimbs = shift >> 6;
862 shiftlow = shift & 0x3F;
863 shifthigh = 64 - shiftlow;
864 r->
d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0;
865 r->
d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
866 r->
d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
867 r->
d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0;
874 uint64_t mask0, mask1;
875 volatile int vflag = flag;
879 mask0 = vflag + ~((uint64_t)0);
881 r->
d[0] = (r->
d[0] & mask0) | (a->
d[0] & mask1);
882 r->
d[1] = (r->
d[1] & mask0) | (a->
d[1] & mask1);
883 r->
d[2] = (r->
d[2] & mask0) | (a->
d[2] & mask1);
884 r->
d[3] = (r->
d[3] & mask0) | (a->
d[3] & mask1);
890 const uint64_t a0 = a->
v[0], a1 = a->
v[1], a2 = a->
v[2], a3 = a->
v[3], a4 = a->
v[4];
901 r->
d[0] = a0 | a1 << 62;
902 r->
d[1] = a1 >> 2 | a2 << 60;
903 r->
d[2] = a2 >> 4 | a3 << 58;
904 r->
d[3] = a3 >> 6 | a4 << 56;
910 const uint64_t M62 = UINT64_MAX >> 2;
911 const uint64_t a0 = a->
d[0], a1 = a->
d[1], a2 = a->
d[2], a3 = a->
d[3];
915 r->
v[1] = (a0 >> 62 | a1 << 2) & M62;
916 r->
v[2] = (a1 >> 60 | a2 << 4) & M62;
917 r->
v[3] = (a2 >> 58 | a3 << 6) & M62;
922 {{0x3FD25E8CD0364141LL, 0x2ABB739ABD2280EELL, -0x15LL, 0, 256}},
963 return !(a->
d[0] & 1);
#define VERIFY_CHECK(cond)
static SECP256K1_INLINE int secp256k1_scalar_check_overflow(const secp256k1_scalar *a)
#define extract_fast(n)
Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits.
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag)
static SECP256K1_INLINE void secp256k1_u128_accum_u64(secp256k1_uint128 *r, uint64_t a)
static SECP256K1_INLINE int secp256k1_scalar_is_zero(const secp256k1_scalar *a)
static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, const secp256k1_scalar *b)
static SECP256K1_INLINE void secp256k1_u128_rshift(secp256k1_uint128 *r, unsigned int n)
static SECP256K1_INLINE void secp256k1_write_be64(unsigned char *p, uint64_t x)
static SECP256K1_INLINE void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v)
#define sumadd(a)
Add a to the number defined by (c0,c1,c2).
static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x)
#define sumadd_fast(a)
Add a to the number defined by (c0,c1).
static SECP256K1_INLINE int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigned int overflow)
static SECP256K1_INLINE unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count)
static void secp256k1_scalar_to_signed62(secp256k1_modinv64_signed62 *r, const secp256k1_scalar *a)
static SECP256K1_INLINE uint64_t secp256k1_u128_hi_u64(const secp256k1_uint128 *a)
static SECP256K1_INLINE void secp256k1_u128_from_u64(secp256k1_uint128 *r, uint64_t a)
static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag)
static SECP256K1_INLINE void secp256k1_u128_accum_mul(secp256k1_uint128 *r, uint64_t a, uint64_t b)
static SECP256K1_INLINE void secp256k1_scalar_clear(secp256k1_scalar *r)
static const secp256k1_modinv64_modinfo secp256k1_const_modinfo_scalar
static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag)
static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo)
A scalar modulo the group order of the secp256k1 curve.
static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n)
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow)
static SECP256K1_INLINE int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b)
#define extract(n)
Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits.
static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_modinv64_signed62 *a)
static SECP256K1_INLINE int secp256k1_scalar_is_one(const secp256k1_scalar *a)
#define muladd_fast(a, b)
Add a*b to the number defined by (c0,c1).
#define muladd(a, b)
Add a*b to the number defined by (c0,c1,c2).
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b)
static SECP256K1_INLINE void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift)
static void secp256k1_modinv64(secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo)
static int secp256k1_scalar_is_high(const secp256k1_scalar *a)
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b)
static SECP256K1_INLINE uint64_t secp256k1_read_be64(const unsigned char *p)
#define SECP256K1_CHECKMEM_CHECK_VERIFY(p, len)
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k)
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar *a)
static SECP256K1_INLINE unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count)
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a)
static void secp256k1_scalar_verify(const secp256k1_scalar *r)
Check invariants on a scalar (no-op unless VERIFY is enabled).
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x)
static SECP256K1_INLINE int secp256k1_scalar_is_even(const secp256k1_scalar *a)
static SECP256K1_INLINE uint64_t secp256k1_u128_to_u64(const secp256k1_uint128 *a)