14 #ifndef SECP256K1_FIELD_INNER5X52_IMPL_H
15 #define SECP256K1_FIELD_INNER5X52_IMPL_H
29 uint64_t tmp1, tmp2, tmp3;
31 "movq 0(%%rsi),%%r10\n"
32 "movq 8(%%rsi),%%r11\n"
33 "movq 16(%%rsi),%%r12\n"
34 "movq 24(%%rsi),%%r13\n"
35 "movq 32(%%rsi),%%r14\n"
38 "movq 0(%%rbx),%%rax\n"
43 "movq 8(%%rbx),%%rax\n"
48 "movq 16(%%rbx),%%rax\n"
53 "movq 24(%%rbx),%%rax\n"
58 "movq 32(%%rbx),%%rax\n"
63 "movq $0xfffffffffffff,%%rdx\n"
65 "movq $0x1000003d10,%%rdx\n"
70 "shrdq $52,%%r9,%%r8\n"
73 "movq $0xfffffffffffff,%%rdx\n"
77 "shrdq $52,%%r15,%%rcx\n"
80 "movq 0(%%rbx),%%rax\n"
85 "movq 8(%%rbx),%%rax\n"
90 "movq 16(%%rbx),%%rax\n"
95 "movq 24(%%rbx),%%rax\n"
100 "movq 32(%%rbx),%%rax\n"
106 "movq $0x1000003d10,%%rdx\n"
112 "movq $0xfffffffffffff,%%rdx\n"
115 "shrdq $52,%%r15,%%rcx\n"
122 "movq $0xffffffffffff,%%rax\n"
126 "movq 0(%%rbx),%%rax\n"
131 "movq 8(%%rbx),%%rax\n"
136 "movq 16(%%rbx),%%rax\n"
141 "movq 24(%%rbx),%%rax\n"
146 "movq 32(%%rbx),%%rax\n"
152 "movq $0xfffffffffffff,%%rdx\n"
155 "shrdq $52,%%r15,%%rcx\n"
162 "movq $0x1000003d1,%%rax\n"
168 "movq $0xfffffffffffff,%%rdx\n"
170 "movq %%rax,0(%%rdi)\n"
172 "shrdq $52,%%r9,%%r8\n"
175 "movq 0(%%rbx),%%rax\n"
180 "movq 8(%%rbx),%%rax\n"
185 "movq 16(%%rbx),%%rax\n"
190 "movq 24(%%rbx),%%rax\n"
195 "movq 32(%%rbx),%%rax\n"
201 "movq $0xfffffffffffff,%%rdx\n"
203 "movq $0x1000003d10,%%rdx\n"
208 "shrdq $52,%%r15,%%rcx\n"
212 "movq $0xfffffffffffff,%%rdx\n"
214 "movq %%rax,8(%%rdi)\n"
216 "shrdq $52,%%r9,%%r8\n"
219 "movq 0(%%rbx),%%rax\n"
224 "movq 8(%%rbx),%%rax\n"
229 "movq 16(%%rbx),%%rax\n"
237 "movq 24(%%rbx),%%rax\n"
242 "movq 32(%%rbx),%%rax\n"
248 "movq $0xfffffffffffff,%%rdx\n"
250 "movq $0x1000003d10,%%rdx\n"
255 "shrdq $52,%%r15,%%rcx\n"
258 "movq $0xfffffffffffff,%%rdx\n"
260 "movq %%rax,16(%%rdi)\n"
262 "shrdq $52,%%r9,%%r8\n"
268 "movq $0x1000003d10,%%rdx\n"
274 "movq $0xfffffffffffff,%%rdx\n"
276 "movq %%rax,24(%%rdi)\n"
278 "shrdq $52,%%r9,%%r8\n"
282 "movq %%r8,32(%%rdi)\n"
283 :
"+S"(a),
"=&m"(tmp1),
"=&m"(tmp2),
"=&m"(tmp3)
285 :
"%rax",
"%rcx",
"%rdx",
"%r8",
"%r9",
"%r10",
"%r11",
"%r12",
"%r13",
"%r14",
"%r15",
"cc",
"memory"
299 uint64_t tmp1, tmp2, tmp3;
300 __asm__ __volatile__(
301 "movq 0(%%rsi),%%r10\n"
302 "movq 8(%%rsi),%%r11\n"
303 "movq 16(%%rsi),%%r12\n"
304 "movq 24(%%rsi),%%r13\n"
305 "movq 32(%%rsi),%%r14\n"
306 "movq $0xfffffffffffff,%%r15\n"
309 "leaq (%%r10,%%r10,1),%%rax\n"
314 "leaq (%%r11,%%r11,1),%%rax\n"
325 "movq $0x1000003d10,%%rdx\n"
330 "shrdq $52,%%r9,%%r8\n"
336 "shrdq $52,%%rcx,%%rbx\n"
346 "leaq (%%r11,%%r11,1),%%rax\n"
357 "movq $0x1000003d10,%%rdx\n"
365 "shrdq $52,%%rcx,%%rbx\n"
372 "movq $0xffffffffffff,%%rax\n"
386 "leaq (%%r12,%%r12,1),%%rax\n"
394 "shrdq $52,%%rcx,%%rbx\n"
401 "movq $0x1000003d1,%%rax\n"
408 "movq %%rax,0(%%rdi)\n"
410 "shrdq $52,%%r9,%%r8\n"
432 "movq $0x1000003d10,%%rdx\n"
437 "shrdq $52,%%rcx,%%rbx\n"
442 "movq %%rax,8(%%rdi)\n"
444 "shrdq $52,%%r9,%%r8\n"
467 "movq $0x1000003d10,%%rdx\n"
472 "shrdq $52,%%rcx,%%rbx\n"
476 "movq %%rax,16(%%rdi)\n"
478 "shrdq $52,%%r9,%%r8\n"
484 "movq $0x1000003d10,%%rdx\n"
491 "movq %%rax,24(%%rdi)\n"
493 "shrdq $52,%%r9,%%r8\n"
497 "movq %%r8,32(%%rdi)\n"
498 :
"+S"(a),
"=&m"(tmp1),
"=&m"(tmp2),
"=&m"(tmp3)
500 :
"%rax",
"%rbx",
"%rcx",
"%rdx",
"%r8",
"%r9",
"%r10",
"%r11",
"%r12",
"%r13",
"%r14",
"%r15",
"cc",
"memory"
static SECP256K1_INLINE void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t *SECP256K1_RESTRICT b)
Changelog:
static SECP256K1_INLINE void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t *a)
#define SECP256K1_RESTRICT