-rw-r--r-- 9901 lib1305-20250415/crypto_onetimeauth/poly1305/amd64-mxaa-g8/poly1305_mxaa_g8.S raw
/* assembly to compute poly1305 using precomputed key powers and
applying lazy reduction over a group of 8 field elements */
#include "crypto_asm_hidden.h"
// linker define poly1305_mxaa_g8
// linker use mask2
// linker use mask2c
// linker use p0
// linker use p1
// linker use p2
#define mask2 CRYPTO_SHARED_NAMESPACE(mask2)
#define mask2c CRYPTO_SHARED_NAMESPACE(mask2c)
#define p0 CRYPTO_SHARED_NAMESPACE(p0)
#define p1 CRYPTO_SHARED_NAMESPACE(p1)
#define p2 CRYPTO_SHARED_NAMESPACE(p2)
#include "poly1305_asm.h"
.p2align 5
ASM_HIDDEN _CRYPTO_SHARED_NAMESPACE(poly1305_mxaa_g8)
ASM_HIDDEN CRYPTO_SHARED_NAMESPACE(poly1305_mxaa_g8)
.global _CRYPTO_SHARED_NAMESPACE(poly1305_mxaa_g8)
.global CRYPTO_SHARED_NAMESPACE(poly1305_mxaa_g8)
_CRYPTO_SHARED_NAMESPACE(poly1305_mxaa_g8):
CRYPTO_SHARED_NAMESPACE(poly1305_mxaa_g8):
movq %rsp,%r11
andq $-32,%rsp
subq $128,%rsp
movq %r11,0(%rsp)
movq %r12,8(%rsp)
movq %r13,16(%rsp)
movq %r14,24(%rsp)
movq %r15,32(%rsp)
movq %rbx,40(%rsp)
movq %rbp,48(%rsp)
movq %rdi,56(%rsp)
movq %r8,64(%rsp)
movq %r9,72(%rsp)
/* store last 16 bytes of the key */
movq 192(%rdx),%r14
movq 200(%rdx),%r15
movq %r14,88(%rsp)
movq %r15,96(%rsp)
/* key = (r15 : r14) */
movq 0(%rdx),%r14
movq 8(%rdx),%r15
/* initialize a quad-word on the stack with 0 */
movq $0,104(%rsp)
/* if the message has a single block */
cmpq $1,%rcx
je .L5
movq %rcx,80(%rsp)
movq %rdx,%rdi
movq %rdx,112(%rsp)
movq $0,%r8
movq $0,%r9
movq $0,%r10
movq $0,%r11
movq $0,%r12
cmpq $2,%rcx
je .LB2
cmpq $3,%rcx
je .LB3
cmpq $4,%rcx
je .LB4
cmpq $5,%rcx
je .LB5
cmpq $6,%rcx
je .LB6
cmpq $7,%rcx
je .LB7
.LB8:
fe1305_mul_taun(0,144)
fe1305_add_product()
fe1305_mul_taun(16,120)
fe1305_add_product()
fe1305_mul_taun(32,96)
fe1305_add_product()
fe1305_mul_taun(48,72)
fe1305_add_product()
fe1305_mul_taun(64,48)
fe1305_add_product()
fe1305_mul_taun(80,24)
fe1305_add_product()
fe1305_mul_tau(96,0)
fe1305_add_product()
fe1305_reduce_5l()
fe1305_reduce_3l()
addq $112,%rsi
movq 80(%rsp),%rcx
subq $8,%rcx
movq %rcx,80(%rsp)
/* if there are no blocks left before processing the last block */
cmpq $0,%rcx
je .LB0
/* if there is one more block before processing the last block */
fe1305_add_msg_block(0)
addq $16,%rsi
cmpq $1,%rcx
je .LB1
.LT2:
cmpq $2,%rcx
jg .LT3
/* if there are two more block before processing the last block */
fe1305_mul_taunr(24)
jmp .LB2
.LT3:
cmpq $3,%rcx
jg .LT4
/* if there are three more block before processing the last block */
fe1305_mul_taunr(48)
jmp .LB3
.LT4:
cmpq $4,%rcx
jg .LT5
/* if there are four more block before processing the last block */
fe1305_mul_taunr(72)
jmp .LB4
.LT5:
cmpq $5,%rcx
jg .LT6
/* if there are five more block before processing the last block */
fe1305_mul_taunr(96)
jmp .LB5
.LT6:
cmpq $6,%rcx
jg .LT7
/* if there are six more block before processing the last block */
fe1305_mul_taunr(120)
jmp .LB6
.LT7:
cmpq $7,%rcx
jg .LT8
/* if there are seven more block before processing the last block */
fe1305_mul_taunr(144)
jmp .LB7
.LT8:
/* if there are at least eight more block before processing the last block */
fe1305_mul_taunr(168)
jmp .LB8
.LB1:
fe1305_mul_taur(0)
fe1305_reduce_5l()
jmp .LB0
.LB2:
fe1305_mul_tau(0,0)
fe1305_add_product()
fe1305_reduce_5l()
addq $16,%rsi
jmp .LB0
.LB3:
fe1305_mul_taun(0,24)
fe1305_add_product()
fe1305_mul_tau(16,0)
fe1305_add_product()
fe1305_reduce_5l()
addq $32,%rsi
jmp .LB0
.LB4:
fe1305_mul_taun(0,48)
fe1305_add_product()
fe1305_mul_taun(16,24)
fe1305_add_product()
fe1305_mul_tau(32,0)
fe1305_add_product()
fe1305_reduce_5l()
addq $48,%rsi
jmp .LB0
.LB5:
fe1305_mul_taun(0,72)
fe1305_add_product()
fe1305_mul_taun(16,48)
fe1305_add_product()
fe1305_mul_taun(32,24)
fe1305_add_product()
fe1305_mul_tau(48,0)
fe1305_add_product()
fe1305_reduce_5l()
addq $64,%rsi
jmp .LB0
.LB6:
fe1305_mul_taun(0,96)
fe1305_add_product()
fe1305_mul_taun(16,72)
fe1305_add_product()
fe1305_mul_taun(32,48)
fe1305_add_product()
fe1305_mul_taun(48,24)
fe1305_add_product()
fe1305_mul_tau(64,0)
fe1305_add_product()
fe1305_reduce_5l()
addq $80,%rsi
jmp .LB0
.LB7:
fe1305_mul_taun(0,120)
fe1305_add_product()
fe1305_mul_taun(16,96)
fe1305_add_product()
fe1305_mul_taun(32,72)
fe1305_add_product()
fe1305_mul_taun(48,48)
fe1305_add_product()
fe1305_mul_taun(64,24)
fe1305_add_product()
fe1305_mul_tau(80,0)
fe1305_add_product()
fe1305_reduce_5l()
addq $96,%rsi
.LB0:
/* if the last block is full */
cmpq $0,64(%rsp)
je .L3
/* if the last block has 8 bytes */
cmpq $64,64(%rsp)
je .L2
/* if the last block has 1 to 7 bytes */
jl .L1
/* else if the last block has 9 to 15 bytes */
/* first chunk of message block = (r12) */
movq 0(%rsi),%r12
addq $8,%rsi
movq $128,%rbx
subq 64(%rsp),%rbx
movq $64,%rcx
subq %rbx,%rcx
shrq $3,%rcx
leaq 104(%rsp),%rdi
rep movsb (%rsi),(%rdi)
/* second chunk of message block = (r13) */
movq 104(%rsp),%r13
movq $-1,%r11
movq %rbx,%rcx
shrq %cl,%r11
addq $1,%r11
orq %r11,%r13
movq $0,%r14
jmp .L4
.L1:
movq 64(%rsp),%rcx
shrq $3,%rcx
leaq 104(%rsp),%rdi
rep movsb (%rsi),(%rdi)
/* first chunk of message block = (r12) */
movq 104(%rsp),%r12
movq $-1,%r11
movb $64,%cl
subb 64(%rsp),%cl
shrq %cl,%r11
addq $1,%r11
orq %r11,%r12
/* second chunk of message block = (r13) */
movq $0,%r13
movq $0,%r14
jmp .L4
.L2:
movq 0(%rsi),%r12
movq $1,%r13
movq $0,%r14
jmp .L4
.L3:
movq 0(%rsi),%r12
movq 8(%rsi),%r13
movq $1,%r14
.L4:
addq %r12,%r8
adcq %r13,%r9
adcq %r14,%r10
movq 112(%rsp),%rdi
fe1305_mul_taur(0)
fe1305_reduce_5l()
jmp .L9
.L5:
/* if the single message block is full */
cmpq $0,64(%rsp)
je .L8
/* if the single message block has 1 to 7 bytes */
cmpq $8,72(%rsp)
jl .L6
/* if the single message block has 8 bytes */
je .L7
/* else if the single message block has 9 to 15 bytes */
/* first chunk of message block = (r13) */
movq 0(%rsi),%r13
addq $8,%rsi
movq $128,%rbx
subq 64(%rsp),%rbx
movq $64,%rcx
subq %rbx,%rcx
shrq $3,%rcx
leaq 104(%rsp),%rdi
rep movsb (%rsi),(%rdi)
/* second chunk of message block = (rax) */
movq 104(%rsp),%rax
movq $-1,%r11
movq %rbx,%rcx
shrq %cl,%r11
addq $1,%r11
orq %r11,%rax
/* integer multiplication */
movq %r13,%rdx
mulx %r14,%r8,%r9
mulx %r15,%rbx,%r10
addq %rbx,%r9
adcq $0,%r10
movq %rax,%rdx
mulx %r14,%rax,%rbx
mulx %r15,%rdx,%r11
addq %rdx,%rbx
adcq $0,%r11
addq %rax,%r9
adcq %rbx,%r10
adcq $0,%r11
/* reduction on the integer product (r11 : r10 : r9 : r8) */
xorq %r12,%r12
movq %r10,%rbx
andq mask2(%rip),%r10
andq mask2c(%rip),%rbx
addq %rbx,%r8
adcq %r11,%r9
adcq %r12,%r10
shrd $2,%r11,%rbx
shrq $2,%r11
addq %rbx,%r8
adcq %r11,%r9
adcq %r12,%r10
jmp .L9
.L6:
movq 64(%rsp),%rcx
shrq $3,%rcx
leaq 104(%rsp),%rdi
rep movsb (%rsi),(%rdi)
/* message block = (r13) */
movq 104(%rsp),%r13
movq $-1,%r11
movb $64,%cl
subb 64(%rsp),%cl
shrq %cl,%r11
addq $1,%r11
orq %r11,%r13
/* integer multiplication */
movq %r13,%rdx
mulx %r14,%r8,%r9
mulx %r15,%rbx,%r10
addq %rbx,%r9
adcq $0,%r10
jmp .L9
.L7:
/* integer multiplication */
movq 0(%rsi),%rdx
xorq %r11,%r11
mulx %r14,%r8,%r9
mulx %r15,%rbx,%r10
addq %rbx,%r9
adcq %r11,%r10
addq %r14,%r9
adcq %r15,%r10
adcq %r11,%r11
/* reduction on the integer product (r11 : r10 : r9 : r8) */
movq %r10,%r13
andq mask2(%rip),%r10
andq mask2c(%rip),%r13
addq %r13,%r8
adcq %r11,%r9
adcq $0,%r10
shrd $2,%r11,%r13
shrq $2,%r11
addq %r13,%r8
adcq %r11,%r9
adcq $0,%r10
jmp .L9
.L8:
/* integer multiplication */
movq 0(%rsi),%rdx
mulx %r14,%r8,%r9
mulx %r15,%rbx,%r10
addq %rbx,%r9
adcq $0,%r10
movq 8(%rsi),%rdx
mulx %r14,%rax,%rbx
mulx %r15,%rdx,%r11
addq %rdx,%rbx
adcq $0,%r11
addq %rax,%r9
adcq %rbx,%r10
adcq $0,%r11
xorq %r12,%r12
addq %r14,%r10
adcq %r15,%r11
adcq $0,%r12
/* reduction on the integer product (r12 : r11 : r10 : r9 : r8) */
movq %r10,%rbx
andq mask2(%rip),%r10
andq mask2c(%rip),%rbx
addq %rbx,%r8
adcq %r11,%r9
adcq %r12,%r10
shrd $2,%r11,%rbx
shrd $2,%r12,%r11
shrq $2,%r12
addq %rbx,%r8
adcq %r11,%r9
adcq %r12,%r10
.L9:
/* final reduction on (r10 : r9 : r8) */
movq %r10,%r11
shrq $2,%r11
andq mask2(%rip),%r10
imul $5,%r11,%r11
addq %r11,%r8
adcq $0,%r9
adcq $0,%r10
/* freeze the reduced field element (r10 : r9 : r8) */
movq %r8,%r11
movq %r9,%r12
movq %r10,%r13
subq p0(%rip),%r8
sbbq p1(%rip),%r9
sbbq p2(%rip),%r10
movq %r10,%rcx
shlq $62,%rcx
cmovc %r11,%r8
cmovc %r12,%r9
cmovc %r13,%r10
/* add last 16 bytes of the key */
addq 88(%rsp),%r8
adcq 96(%rsp),%r9
adcq $0,%r10
/* store first 128 bytes of the result */
movq 56(%rsp),%rdi
movq %r8,0(%rdi)
movq %r9,8(%rdi)
movq 0(%rsp),%r11
movq 8(%rsp),%r12
movq 16(%rsp),%r13
movq 24(%rsp),%r14
movq 32(%rsp),%r15
movq 40(%rsp),%rbx
movq 48(%rsp),%rbp
movq %r11,%rsp
ret