-rw-r--r-- 9930 lib1305-20250415/crypto_onetimeauth/poly1305/amd64-maax-g1/poly1305_maax_g1.S raw
/* assembly to compute poly1305 */
#include "crypto_asm_hidden.h"
// linker define poly1305_maax_g1
// linker use mask2
// linker use mask2c
// linker use p0
// linker use p1
// linker use p2
#define mask2 CRYPTO_SHARED_NAMESPACE(mask2)
#define mask2c CRYPTO_SHARED_NAMESPACE(mask2c)
#define p0 CRYPTO_SHARED_NAMESPACE(p0)
#define p1 CRYPTO_SHARED_NAMESPACE(p1)
#define p2 CRYPTO_SHARED_NAMESPACE(p2)
.p2align 5
ASM_HIDDEN _CRYPTO_SHARED_NAMESPACE(poly1305_maax_g1)
ASM_HIDDEN CRYPTO_SHARED_NAMESPACE(poly1305_maax_g1)
.global _CRYPTO_SHARED_NAMESPACE(poly1305_maax_g1)
.global CRYPTO_SHARED_NAMESPACE(poly1305_maax_g1)
_CRYPTO_SHARED_NAMESPACE(poly1305_maax_g1):
CRYPTO_SHARED_NAMESPACE(poly1305_maax_g1):
movq %rsp,%r11
andq $-32,%rsp
subq $128,%rsp
movq %r11,0(%rsp)
movq %r12,8(%rsp)
movq %r13,16(%rsp)
movq %r14,24(%rsp)
movq %r15,32(%rsp)
movq %rbx,40(%rsp)
movq %rbp,48(%rsp)
movq %rdi,56(%rsp)
movq %r8,64(%rsp)
movq %r9,72(%rsp)
/* store last 16 bytes of the key */
movq 16(%rdx),%r14
movq 24(%rdx),%r15
movq %r14,80(%rsp)
movq %r15,88(%rsp)
/* key = (r15 : r14) */
movq 0(%rdx),%r14
movq 8(%rdx),%r15
/* initialize a quad-word on the stack with 0 */
movq $0,96(%rsp)
/* if the message has a single block */
cmpq $1,%rcx
je .L9
/* else if the message has more than a single block */
/* first message block = (rax : r13) */
movq 0(%rsi),%r13
movq 8(%rsi),%rax
/* else loop around and multiply the 129-byte (3-limb)
* message block with the 128-byte (2-limb) key;
* read the 129th bit in %rdi before proceeding
*/
movq $1,%rdi
.L1:
/* integer multiplication */
xorq %r11,%r11
movq %r13,%rdx
mulx %r14,%r13,%r9
mulx %r15,%rbx,%r10
adcx %rbx,%r9
adcx %r11,%r10
xorq %r12,%r12
movq %rax,%rdx
mulx %r14,%rax,%rbp
adcx %r9,%rax
adox %rbp,%r10
mulx %r15,%rbx,%rbp
adcx %rbx,%r10
adox %rbp,%r11
adcx %r12,%r11
movq %rdi,%rdx
xorq %rdi,%rdi
mulx %r14,%rbx,%rbp
adcx %rbx,%r10
adox %rbp,%r11
mulx %r15,%rbx,%rbp
adcx %rbx,%r11
adox %rbp,%r12
adcx %rdi,%r12
/* reduction on the integer product (r12 : r11 : r10 : rax : r13) */
movq %r10,%rdi
andq mask2(%rip),%rdi
andq mask2c(%rip),%r10
addq %r10,%r13
adcq %r11,%rax
adcq %r12,%rdi
shrd $2,%r11,%r10
shrd $2,%r12,%r11
shrq $2,%r12
addq %r10,%r13
adcq %r11,%rax
adcq %r12,%rdi
movq %rdi,%r11
andq mask2(%rip),%rdi
shrq $2,%r11
imul $5,%r11,%r11
addq %r11,%r13
adcq $0,%rax
adcq $0,%rdi
addq $16,%rsi
subq $1,%rcx
cmpq $2,%rcx
jg .L2
je .L3
jl .L4
.L2:
/* add the next message block and loop */
addq 0(%rsi),%r13
adcq 8(%rsi),%rax
adcq $1,%rdi
jmp .L1
.L3:
/* add the second last block and proceed */
addq 0(%rsi),%r13
adcq 8(%rsi),%rax
adcq $1,%rdi
/* integer multiplication */
xorq %r11,%r11
movq %r13,%rdx
mulx %r14,%r13,%r9
mulx %r15,%rbx,%r10
adcx %rbx,%r9
adcx %r11,%r10
xorq %r12,%r12
movq %rax,%rdx
mulx %r14,%rax,%rbp
adcx %r9,%rax
adox %rbp,%r10
mulx %r15,%rbx,%rbp
adcx %rbx,%r10
adox %rbp,%r11
adcx %r12,%r11
movq %rdi,%rdx
xorq %rdi,%rdi
mulx %r14,%rbx,%rbp
adcx %rbx,%r10
adox %rbp,%r11
mulx %r15,%rbx,%rbp
adcx %rbx,%r11
adox %rbp,%r12
adcx %rdi,%r12
/* reduction on the integer product (r12 : r11 : r10 : rax : r13) */
movq %r10,%rdi
andq mask2(%rip),%rdi
andq mask2c(%rip),%r10
addq %r10,%r13
adcq %r11,%rax
adcq %r12,%rdi
shrd $2,%r11,%r10
shrd $2,%r12,%r11
shrq $2,%r12
addq %r10,%r13
adcq %r11,%rax
adcq %r12,%rdi
addq $16,%rsi
subq $1,%rcx
.L4:
/* process the last block */
movq %rdi,%r12
/* if the last block is full */
cmpq $0,64(%rsp)
je .L7
/* if the last block has 8 bytes */
cmpq $64,64(%rsp)
je .L6
/* if the last block has 1 to 7 bytes */
jl .L5
/* else if the last block has 9 to 15 bytes */
/* first chunk of message block = (r8) */
movq 0(%rsi),%r8
addq $8,%rsi
movq $128,%rbx
subq 64(%rsp),%rbx
movq $64,%rcx
subq %rbx,%rcx
shrq $3,%rcx
leaq 96(%rsp),%rdi
rep movsb (%rsi),(%rdi)
/* second chunk of message block = (r9) */
movq 96(%rsp),%r9
movq $-1,%r11
movq %rbx,%rcx
shrq %cl,%r11
addq $1,%r11
orq %r11,%r9
movq $0,%r10
jmp .L8
.L5:
movq 64(%rsp),%rcx
shrq $3,%rcx
leaq 96(%rsp),%rdi
rep movsb (%rsi),(%rdi)
/* first chunk of message block = (r8) */
movq 96(%rsp),%r8
movq $-1,%r11
movb $64,%cl
subb 64(%rsp),%cl
shrq %cl,%r11
addq $1,%r11
orq %r11,%r8
/* second chunk of message block = (r9) */
movq $0,%r9
movq $0,%r10
jmp .L8
.L6:
movq 0(%rsi),%r8
movq $1,%r9
movq $0,%r10
jmp .L8
.L7:
movq 0(%rsi),%r8
movq 8(%rsi),%r9
movq $1,%r10
.L8:
movq %r12,%rdi
addq %r8,%r13
adcq %r9,%rax
adcq %r10,%rdi
/* integer multiplication */
xorq %r11,%r11
movq %r13,%rdx
mulx %r14,%r8,%r9
mulx %r15,%rbx,%r10
adcx %rbx,%r9
adcx %r11,%r10
xorq %r12,%r12
movq %rax,%rdx
mulx %r14,%rbx,%rbp
adcx %rbx,%r9
adox %rbp,%r10
mulx %r15,%rbx,%rbp
adcx %rbx,%r10
adox %rbp,%r11
adcx %r12,%r11
xorq %rax,%rax
movq %rdi,%rdx
mulx %r14,%rbx,%rbp
adcx %rbx,%r10
adox %rbp,%r11
mulx %r15,%rbx,%rbp
adcx %rbx,%r11
adox %rbp,%r12
adcx %rax,%r12
/* reduction on the integer product (r12 : r11 : r10 : r9 : r8) */
movq %r10,%rbx
andq mask2(%rip),%r10
andq mask2c(%rip),%rbx
addq %rbx,%r8
adcq %r11,%r9
adcq %r12,%r10
shrd $2,%r11,%rbx
shrd $2,%r12,%r11
shrq $2,%r12
addq %rbx,%r8
adcq %r11,%r9
adcq %r12,%r10
jmp .L13
.L9:
/* if the single message block is full */
cmpq $0,64(%rsp)
je .L12
/* if the single message block has 1 to 7 bytes */
cmpq $8,72(%rsp)
jl .L10
/* if the single message block has 8 bytes */
je .L11
/* else if the single message block has 9 to 15 bytes */
/* first chunk of message block = (r13) */
movq 0(%rsi),%r13
addq $8,%rsi
movq $128,%rbx
subq 64(%rsp),%rbx
movq $64,%rcx
subq %rbx,%rcx
shrq $3,%rcx
leaq 96(%rsp),%rdi
rep movsb (%rsi),(%rdi)
/* second chunk of message block = (rax) */
movq 96(%rsp),%rax
movq $-1,%r11
movq %rbx,%rcx
shrq %cl,%r11
addq $1,%r11
orq %r11,%rax
/* integer multiplication */
xorq %r11,%r11
movq %r13,%rdx
mulx %r14,%r8,%r9
mulx %r15,%rbx,%r10
adcx %rbx,%r9
adcx %r11,%r10
xorq %r12,%r12
movq %rax,%rdx
mulx %r14,%rbx,%rbp
adcx %rbx,%r9
adox %rbp,%r10
mulx %r15,%rbx,%rbp
adcx %rbx,%r10
adox %rbp,%r11
adcx %r12,%r11
/* reduction on the integer product (r11 : r10 : r9 : r8) */
movq %r10,%rbx
andq mask2(%rip),%r10
andq mask2c(%rip),%rbx
addq %rbx,%r8
adcq %r11,%r9
adcq %r12,%r10
shrd $2,%r11,%rbx
shrq $2,%r11
addq %rbx,%r8
adcq %r11,%r9
adcq %r12,%r10
jmp .L13
.L10:
movq 64(%rsp),%rcx
shrq $3,%rcx
leaq 96(%rsp),%rdi
rep movsb (%rsi),(%rdi)
/* message block = (r13) */
movq 96(%rsp),%r13
movq $-1,%r11
movb $64,%cl
subb 64(%rsp),%cl
shrq %cl,%r11
addq $1,%r11
orq %r11,%r13
/* integer multiplication */
xorq %r11,%r11
movq %r13,%rdx
mulx %r14,%r8,%r9
mulx %r15,%rbx,%r10
adcx %rbx,%r9
adcx %r11,%r10
jmp .L13
.L11:
/* integer multiplication */
xorq %r11,%r11
movq 0(%rsi),%rdx
mulx %r14,%r8,%r9
mulx %r15,%rbx,%r10
adcx %rbx,%r9
adcx %r11,%r10
xorq %rax,%rax
adcx %r14,%r9
adox %rax,%r10
adcx %r15,%r10
adox %rax,%r11
adcx %rax,%r11
/* reduction on the integer product (r11 : r10 : r9 : r8) */
movq %r10,%r13
andq mask2(%rip),%r10
andq mask2c(%rip),%r13
addq %r13,%r8
adcq %r11,%r9
adcq $0,%r10
shrd $2,%r11,%r13
shrq $2,%r11
addq %r13,%r8
adcq %r11,%r9
adcq $0,%r10
jmp .L13
.L12:
/* integer multiplication */
xorq %r11,%r11
movq 0(%rsi),%rdx
mulx %r14,%r8,%r9
mulx %r15,%rbx,%r10
adcx %rbx,%r9
adcx %r11,%r10
xorq %r12,%r12
movq 8(%rsi),%rdx
mulx %r14,%rbx,%rbp
adcx %rbx,%r9
adox %rbp,%r10
mulx %r15,%rbx,%rbp
adcx %rbx,%r10
adox %rbp,%r11
adcx %r12,%r11
xorq %rax,%rax
adcx %r14,%r10
adox %rax,%r11
adcx %r15,%r11
adox %rax,%r12
adcx %rax,%r12
/* reduction on the integer product (r12 : r11 : r10 : r9 : r8) */
movq %r10,%rbx
andq mask2(%rip),%r10
andq mask2c(%rip),%rbx
addq %rbx,%r8
adcq %r11,%r9
adcq %r12,%r10
shrd $2,%r11,%rbx
shrd $2,%r12,%r11
shrq $2,%r12
addq %rbx,%r8
adcq %r11,%r9
adcq %r12,%r10
.L13:
/* final reduction on (r10 : r9 : r8) */
movq %r10,%r11
shrq $2,%r11
andq mask2(%rip),%r10
imul $5,%r11,%r11
addq %r11,%r8
adcq $0,%r9
adcq $0,%r10
/* freeze the reduced field element (r10 : r9 : r8) */
movq %r8,%r11
movq %r9,%r12
movq %r10,%r13
subq p0(%rip),%r8
sbbq p1(%rip),%r9
sbbq p2(%rip),%r10
movq %r10,%rcx
shlq $62,%rcx
cmovc %r11,%r8
cmovc %r12,%r9
cmovc %r13,%r10
/* add last 16 bytes of the key */
addq 80(%rsp),%r8
adcq 88(%rsp),%r9
adcq $0,%r10
/* store first 128 bytes of the result */
movq 56(%rsp),%rdi
movq %r8,0(%rdi)
movq %r9,8(%rdi)
movq 0(%rsp),%r11
movq 8(%rsp),%r12
movq 16(%rsp),%r13
movq 24(%rsp),%r14
movq 32(%rsp),%r15
movq 40(%rsp),%rbx
movq 48(%rsp),%rbp
movq %r11,%rsp
ret