-rw-r--r-- 153169 libntruprime-20241008/crypto_hashblocks/sha512/avx2/inner.S raw
#include "crypto_asm_hidden.h"
// linker define inner
# qhasm: int64 input_0
# qhasm: int64 input_1
# qhasm: int64 input_2
# qhasm: int64 input_3
# qhasm: int64 input_4
# qhasm: int64 input_5
# qhasm: stack64 input_6
# qhasm: stack64 input_7
# qhasm: int64 caller_r11
# qhasm: int64 caller_r12
# qhasm: int64 caller_r13
# qhasm: int64 caller_r14
# qhasm: int64 caller_r15
# qhasm: int64 caller_rbx
# qhasm: int64 caller_rbp
# qhasm: stack64 r11_stack
# qhasm: stack64 r12_stack
# qhasm: stack64 r13_stack
# qhasm: stack64 r14_stack
# qhasm: stack64 r15_stack
# qhasm: stack64 rbx_stack
# qhasm: stack64 rbp_stack
# qhasm: int64 statebytes
# qhasm: stack64 statebytes_stack
# qhasm: int64 in
# qhasm: stack64 in_stack
# qhasm: int64 inlen
# qhasm: stack64 inlen_stack
# qhasm: int64 constants
# qhasm: stack64 constants_stack
# qhasm: int64 r0
# qhasm: int64 r1
# qhasm: int64 r2
# qhasm: int64 r3
# qhasm: int64 r4
# qhasm: int64 r5
# qhasm: int64 r6
# qhasm: int64 r7
# qhasm: int64 i
# qhasm: stack256 state0123
# qhasm: stack256 state4567
# qhasm: reg256 X0
# qhasm: reg256 X4
# qhasm: reg256 X8
# qhasm: reg256 X12
# qhasm: reg256 X1
# qhasm: reg256 X5
# qhasm: reg256 X9
# qhasm: reg256 X13
# qhasm: reg256 bigendian64
# qhasm: reg256 D0
# qhasm: reg256 D4
# qhasm: reg256 D8
# qhasm: reg256 D12
# qhasm: reg256 W0
# qhasm: reg256 W2
# qhasm: reg256 W4
# qhasm: reg256 W6
# qhasm: reg256 W8
# qhasm: reg256 W10
# qhasm: reg256 W12
# qhasm: reg256 W14
# qhasm: stack1280 w
# qhasm: stack256 wc0123
# qhasm: stack256 wc4567
# qhasm: stack256 wc891011
# qhasm: stack256 wc12131415
# qhasm: int64 r0andr1
# qhasm: int64 r2andr3
# qhasm: int64 r4andr5
# qhasm: int64 r6andr7
# qhasm: int64 ch0
# qhasm: int64 ch1
# qhasm: int64 ch2
# qhasm: int64 ch3
# qhasm: int64 ch4
# qhasm: int64 ch5
# qhasm: int64 ch6
# qhasm: int64 ch7
# qhasm: int64 maj0
# qhasm: int64 maj1
# qhasm: int64 maj2
# qhasm: int64 maj3
# qhasm: int64 maj4
# qhasm: int64 maj5
# qhasm: int64 maj6
# qhasm: int64 maj7
# qhasm: int64 r0Sigma0
# qhasm: int64 r1Sigma0
# qhasm: int64 r2Sigma0
# qhasm: int64 r3Sigma0
# qhasm: int64 r4Sigma0
# qhasm: int64 r5Sigma0
# qhasm: int64 r6Sigma0
# qhasm: int64 r7Sigma0
# qhasm: int64 r0Sigma1
# qhasm: int64 r1Sigma1
# qhasm: int64 r2Sigma1
# qhasm: int64 r3Sigma1
# qhasm: int64 r4Sigma1
# qhasm: int64 r5Sigma1
# qhasm: int64 r6Sigma1
# qhasm: int64 r7Sigma1
# qhasm: int64 r018
# qhasm: int64 r118
# qhasm: int64 r218
# qhasm: int64 r318
# qhasm: int64 r418
# qhasm: int64 r518
# qhasm: int64 r618
# qhasm: int64 r718
# qhasm: int64 r041
# qhasm: int64 r141
# qhasm: int64 r241
# qhasm: int64 r341
# qhasm: int64 r441
# qhasm: int64 r541
# qhasm: int64 r641
# qhasm: int64 r741
# qhasm: int64 r034
# qhasm: int64 r134
# qhasm: int64 r234
# qhasm: int64 r334
# qhasm: int64 r434
# qhasm: int64 r534
# qhasm: int64 r634
# qhasm: int64 r734
# qhasm: int64 r039
# qhasm: int64 r139
# qhasm: int64 r239
# qhasm: int64 r339
# qhasm: int64 r439
# qhasm: int64 r539
# qhasm: int64 r639
# qhasm: int64 r739
# qhasm: reg256 X1right1
# qhasm: reg256 X1left63
# qhasm: reg256 X1right8
# qhasm: reg256 X1left56
# qhasm: reg256 X1right7
# qhasm: reg256 X1sigma0
# qhasm: reg256 X5right1
# qhasm: reg256 X5left63
# qhasm: reg256 X5right8
# qhasm: reg256 X5left56
# qhasm: reg256 X5right7
# qhasm: reg256 X5sigma0
# qhasm: reg256 X9right1
# qhasm: reg256 X9left63
# qhasm: reg256 X9right8
# qhasm: reg256 X9left56
# qhasm: reg256 X9right7
# qhasm: reg256 X9sigma0
# qhasm: reg256 X13right1
# qhasm: reg256 X13left63
# qhasm: reg256 X13right8
# qhasm: reg256 X13left56
# qhasm: reg256 X13right7
# qhasm: reg256 X13sigma0
# qhasm: reg256 W0right19
# qhasm: reg256 W0right61
# qhasm: reg256 W0right6
# qhasm: reg256 W0left45
# qhasm: reg256 W0left3
# qhasm: reg256 W0sigma1
# qhasm: reg256 W2right19
# qhasm: reg256 W2right61
# qhasm: reg256 W2right6
# qhasm: reg256 W2left45
# qhasm: reg256 W2left3
# qhasm: reg256 W2sigma1
# qhasm: reg256 W4right19
# qhasm: reg256 W4right61
# qhasm: reg256 W4right6
# qhasm: reg256 W4left45
# qhasm: reg256 W4left3
# qhasm: reg256 W4sigma1
# qhasm: reg256 W6right19
# qhasm: reg256 W6right61
# qhasm: reg256 W6right6
# qhasm: reg256 W6left45
# qhasm: reg256 W6left3
# qhasm: reg256 W6sigma1
# qhasm: reg256 W8right19
# qhasm: reg256 W8right61
# qhasm: reg256 W8right6
# qhasm: reg256 W8left45
# qhasm: reg256 W8left3
# qhasm: reg256 W8sigma1
# qhasm: reg256 W10right19
# qhasm: reg256 W10right61
# qhasm: reg256 W10right6
# qhasm: reg256 W10left45
# qhasm: reg256 W10left3
# qhasm: reg256 W10sigma1
# qhasm: reg256 W12right19
# qhasm: reg256 W12right61
# qhasm: reg256 W12right6
# qhasm: reg256 W12left45
# qhasm: reg256 W12left3
# qhasm: reg256 W12sigma1
# qhasm: reg256 W14right19
# qhasm: reg256 W14right61
# qhasm: reg256 W14right6
# qhasm: reg256 W14left45
# qhasm: reg256 W14left3
# qhasm: reg256 W14sigma1
# qhasm: enter CRYPTO_SHARED_NAMESPACE(inner)
.p2align 7
ASM_HIDDEN _CRYPTO_SHARED_NAMESPACE(inner)
.global _CRYPTO_SHARED_NAMESPACE(inner)
ASM_HIDDEN CRYPTO_SHARED_NAMESPACE(inner)
.global CRYPTO_SHARED_NAMESPACE(inner)
_CRYPTO_SHARED_NAMESPACE(inner):
CRYPTO_SHARED_NAMESPACE(inner):
mov %rsp,%r11
and $511,%r11
add $416,%r11
sub %r11,%rsp
# qhasm: constants = input_3
# asm 1: mov <input_3=int64#4,>constants=int64#5
# asm 2: mov <input_3=%rcx,>constants=%r8
mov %rcx,%r8
# qhasm: bigendian64 = mem256[input_3+640]
# asm 1: vmovupd 640(<input_3=int64#4),>bigendian64=reg256#1
# asm 2: vmovupd 640(<input_3=%rcx),>bigendian64=%ymm0
vmovupd 640(%rcx),%ymm0
# qhasm: X0 = mem256[input_0+0]
# asm 1: vmovupd 0(<input_0=int64#1),>X0=reg256#2
# asm 2: vmovupd 0(<input_0=%rdi),>X0=%ymm1
vmovupd 0(%rdi),%ymm1
# qhasm: statebytes = input_0
# asm 1: mov <input_0=int64#1,>statebytes=int64#4
# asm 2: mov <input_0=%rdi,>statebytes=%rcx
mov %rdi,%rcx
# qhasm: X4 = mem256[input_0+32]
# asm 1: vmovupd 32(<input_0=int64#1),>X4=reg256#3
# asm 2: vmovupd 32(<input_0=%rdi),>X4=%ymm2
vmovupd 32(%rdi),%ymm2
# qhasm: 2x 16x X0 = X0[bigendian64]
# asm 1: vpshufb <bigendian64=reg256#1,<X0=reg256#2,>X0=reg256#2
# asm 2: vpshufb <bigendian64=%ymm0,<X0=%ymm1,>X0=%ymm1
vpshufb %ymm0,%ymm1,%ymm1
# qhasm: 2x 16x X4 = X4[bigendian64]
# asm 1: vpshufb <bigendian64=reg256#1,<X4=reg256#3,>X4=reg256#3
# asm 2: vpshufb <bigendian64=%ymm0,<X4=%ymm2,>X4=%ymm2
vpshufb %ymm0,%ymm2,%ymm2
# qhasm: state0123 = X0
# asm 1: vmovapd <X0=reg256#2,>state0123=stack256#1
# asm 2: vmovapd <X0=%ymm1,>state0123=0(%rsp)
vmovapd %ymm1,0(%rsp)
# qhasm: r11_stack = caller_r11
# asm 1: movq <caller_r11=int64#9,>r11_stack=stack64#1
# asm 2: movq <caller_r11=%r11,>r11_stack=320(%rsp)
movq %r11,320(%rsp)
# qhasm: state4567 = X4
# asm 1: vmovapd <X4=reg256#3,>state4567=stack256#2
# asm 2: vmovapd <X4=%ymm2,>state4567=32(%rsp)
vmovapd %ymm2,32(%rsp)
# qhasm: r13_stack = caller_r13
# asm 1: movq <caller_r13=int64#11,>r13_stack=stack64#2
# asm 2: movq <caller_r13=%r13,>r13_stack=328(%rsp)
movq %r13,328(%rsp)
# qhasm: r12_stack = caller_r12
# asm 1: movq <caller_r12=int64#10,>r12_stack=stack64#3
# asm 2: movq <caller_r12=%r12,>r12_stack=336(%rsp)
movq %r12,336(%rsp)
# qhasm: r14_stack = caller_r14
# asm 1: movq <caller_r14=int64#12,>r14_stack=stack64#4
# asm 2: movq <caller_r14=%r14,>r14_stack=344(%rsp)
movq %r14,344(%rsp)
# qhasm: rbx_stack = caller_rbx
# asm 1: movq <caller_rbx=int64#14,>rbx_stack=stack64#5
# asm 2: movq <caller_rbx=%rbx,>rbx_stack=352(%rsp)
movq %rbx,352(%rsp)
# qhasm: r15_stack = caller_r15
# asm 1: movq <caller_r15=int64#13,>r15_stack=stack64#6
# asm 2: movq <caller_r15=%r15,>r15_stack=360(%rsp)
movq %r15,360(%rsp)
# qhasm: rbp_stack = caller_rbp
# asm 1: movq <caller_rbp=int64#15,>rbp_stack=stack64#7
# asm 2: movq <caller_rbp=%rbp,>rbp_stack=368(%rsp)
movq %rbp,368(%rsp)
# qhasm: inlen_stack = input_2
# asm 1: movq <input_2=int64#3,>inlen_stack=stack64#8
# asm 2: movq <input_2=%rdx,>inlen_stack=376(%rsp)
movq %rdx,376(%rsp)
# qhasm: in = input_1
# asm 1: mov <input_1=int64#2,>in=int64#1
# asm 2: mov <input_1=%rsi,>in=%rdi
mov %rsi,%rdi
# qhasm: statebytes_stack = statebytes
# asm 1: movq <statebytes=int64#4,>statebytes_stack=stack64#9
# asm 2: movq <statebytes=%rcx,>statebytes_stack=384(%rsp)
movq %rcx,384(%rsp)
# qhasm: r0 = state0123[0]
# asm 1: movq <state0123=stack256#1,>r0=int64#2
# asm 2: movq <state0123=0(%rsp),>r0=%rsi
movq 0(%rsp),%rsi
# qhasm: r2 = state0123[2]
# asm 1: movq <state0123=stack256#1,>r2=int64#3
# asm 2: movq <state0123=16(%rsp),>r2=%rdx
movq 16(%rsp),%rdx
# qhasm: constants_stack = constants
# asm 1: movq <constants=int64#5,>constants_stack=stack64#10
# asm 2: movq <constants=%r8,>constants_stack=392(%rsp)
movq %r8,392(%rsp)
# qhasm: r1 = state0123[1]
# asm 1: movq <state0123=stack256#1,>r1=int64#4
# asm 2: movq <state0123=8(%rsp),>r1=%rcx
movq 8(%rsp),%rcx
# qhasm: r3 = state0123[3]
# asm 1: movq <state0123=stack256#1,>r3=int64#6
# asm 2: movq <state0123=24(%rsp),>r3=%r9
movq 24(%rsp),%r9
# qhasm: r5 = state4567[1]
# asm 1: movq <state4567=stack256#2,>r5=int64#8
# asm 2: movq <state4567=40(%rsp),>r5=%r10
movq 40(%rsp),%r10
# qhasm: r4 = state4567[0]
# asm 1: movq <state4567=stack256#2,>r4=int64#9
# asm 2: movq <state4567=32(%rsp),>r4=%r11
movq 32(%rsp),%r11
# qhasm: r6 = state4567[2]
# asm 1: movq <state4567=stack256#2,>r6=int64#10
# asm 2: movq <state4567=48(%rsp),>r6=%r12
movq 48(%rsp),%r12
# qhasm: r7 = state4567[3]
# asm 1: movq <state4567=stack256#2,>r7=int64#11
# asm 2: movq <state4567=56(%rsp),>r7=%r13
movq 56(%rsp),%r13
# qhasm: new w
# qhasm: nop9
.byte 0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
# qhasm: nop9
.byte 0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
# qhasm: nop2
.byte 0x66,0x90
# qhasm: outerloop:
._outerloop:
# qhasm: X0 = mem256[in + 0]
# asm 1: vmovupd 0(<in=int64#1),>X0=reg256#2
# asm 2: vmovupd 0(<in=%rdi),>X0=%ymm1
vmovupd 0(%rdi),%ymm1
# qhasm: 2x 16x X0 = X0[bigendian64]
# asm 1: vpshufb <bigendian64=reg256#1,<X0=reg256#2,>X0=reg256#2
# asm 2: vpshufb <bigendian64=%ymm0,<X0=%ymm1,>X0=%ymm1
vpshufb %ymm0,%ymm1,%ymm1
# qhasm: ch7 = r6
# asm 1: mov <r6=int64#10,>ch7=int64#7
# asm 2: mov <r6=%r12,>ch7=%rax
mov %r12,%rax
# qhasm: r4Sigma1 = r4>>>14
# asm 1: rorx $14,<r4=int64#9,>r4Sigma1=int64#12
# asm 2: rorx $14,<r4=%r11,>r4Sigma1=%r14
rorx $14,%r11,%r14
# qhasm: ch7 ^= r5
# asm 1: xor <r5=int64#8,<ch7=int64#7
# asm 2: xor <r5=%r10,<ch7=%rax
xor %r10,%rax
# qhasm: 4x D0 = X0 + mem256[constants + 0]
# asm 1: vpaddq 0(<constants=int64#5),<X0=reg256#2,>D0=reg256#3
# asm 2: vpaddq 0(<constants=%r8),<X0=%ymm1,>D0=%ymm2
vpaddq 0(%r8),%ymm1,%ymm2
# qhasm: r418 = r4>>>18
# asm 1: rorx $18,<r4=int64#9,>r418=int64#13
# asm 2: rorx $18,<r4=%r11,>r418=%r15
rorx $18,%r11,%r15
# qhasm: r4Sigma1 ^= r418
# asm 1: xor <r418=int64#13,<r4Sigma1=int64#12
# asm 2: xor <r418=%r15,<r4Sigma1=%r14
xor %r15,%r14
# qhasm: ch7 &= r4
# asm 1: and <r4=int64#9,<ch7=int64#7
# asm 2: and <r4=%r11,<ch7=%rax
and %r11,%rax
# qhasm: r441 = r4>>>41
# asm 1: rorx $41,<r4=int64#9,>r441=int64#13
# asm 2: rorx $41,<r4=%r11,>r441=%r15
rorx $41,%r11,%r15
# qhasm: r4Sigma1 ^= r441
# asm 1: xor <r441=int64#13,<r4Sigma1=int64#12
# asm 2: xor <r441=%r15,<r4Sigma1=%r14
xor %r15,%r14
# qhasm: r0Sigma0 = r0>>>28
# asm 1: rorx $28,<r0=int64#2,>r0Sigma0=int64#13
# asm 2: rorx $28,<r0=%rsi,>r0Sigma0=%r15
rorx $28,%rsi,%r15
# qhasm: ch7 ^= r6
# asm 1: xor <r6=int64#10,<ch7=int64#7
# asm 2: xor <r6=%r12,<ch7=%rax
xor %r12,%rax
# qhasm: r034 = r0>>>34
# asm 1: rorx $34,<r0=int64#2,>r034=int64#14
# asm 2: rorx $34,<r0=%rsi,>r034=%rbx
rorx $34,%rsi,%rbx
# qhasm: r039 = r0>>>39
# asm 1: rorx $39,<r0=int64#2,>r039=int64#15
# asm 2: rorx $39,<r0=%rsi,>r039=%rbp
rorx $39,%rsi,%rbp
# qhasm: inplace state4567[3] = r7
# asm 1: movq <r7=int64#11,<state4567=stack256#2
# asm 2: movq <r7=%r13,<state4567=56(%rsp)
movq %r13,56(%rsp)
# qhasm: r0Sigma0 ^= r034
# asm 1: xor <r034=int64#14,<r0Sigma0=int64#13
# asm 2: xor <r034=%rbx,<r0Sigma0=%r15
xor %rbx,%r15
# qhasm: r7 += ch7
# asm 1: add <ch7=int64#7,<r7=int64#11
# asm 2: add <ch7=%rax,<r7=%r13
add %rax,%r13
# qhasm: maj6 = r1
# asm 1: mov <r1=int64#4,>maj6=int64#7
# asm 2: mov <r1=%rcx,>maj6=%rax
mov %rcx,%rax
# qhasm: maj6 ^= r0
# asm 1: xor <r0=int64#2,<maj6=int64#7
# asm 2: xor <r0=%rsi,<maj6=%rax
xor %rsi,%rax
# qhasm: r0Sigma0 ^= r039
# asm 1: xor <r039=int64#15,<r0Sigma0=int64#13
# asm 2: xor <r039=%rbp,<r0Sigma0=%r15
xor %rbp,%r15
# qhasm: r0andr1 = r1
# asm 1: mov <r1=int64#4,>r0andr1=int64#14
# asm 2: mov <r1=%rcx,>r0andr1=%rbx
mov %rcx,%rbx
# qhasm: r0andr1 &= r0
# asm 1: and <r0=int64#2,<r0andr1=int64#14
# asm 2: and <r0=%rsi,<r0andr1=%rbx
and %rsi,%rbx
# qhasm: r7 += r4Sigma1
# asm 1: add <r4Sigma1=int64#12,<r7=int64#11
# asm 2: add <r4Sigma1=%r14,<r7=%r13
add %r14,%r13
# qhasm: maj7 = r2
# asm 1: mov <r2=int64#3,>maj7=int64#12
# asm 2: mov <r2=%rdx,>maj7=%r14
mov %rdx,%r14
# qhasm: wc0123 = D0
# asm 1: vmovapd <D0=reg256#3,>wc0123=stack256#3
# asm 2: vmovapd <D0=%ymm2,>wc0123=64(%rsp)
vmovapd %ymm2,64(%rsp)
# qhasm: r7 += wc0123[0]
# asm 1: addq <wc0123=stack256#3,<r7=int64#11
# asm 2: addq <wc0123=64(%rsp),<r7=%r13
addq 64(%rsp),%r13
# qhasm: maj7 &= maj6
# asm 1: and <maj6=int64#7,<maj7=int64#12
# asm 2: and <maj6=%rax,<maj7=%r14
and %rax,%r14
# qhasm: inplace state0123[3] = r3
# asm 1: movq <r3=int64#6,<state0123=stack256#1
# asm 2: movq <r3=%r9,<state0123=24(%rsp)
movq %r9,24(%rsp)
# qhasm: r3 += r7
# asm 1: add <r7=int64#11,<r3=int64#6
# asm 2: add <r7=%r13,<r3=%r9
add %r13,%r9
# qhasm: r7 += r0Sigma0
# asm 1: add <r0Sigma0=int64#13,<r7=int64#11
# asm 2: add <r0Sigma0=%r15,<r7=%r13
add %r15,%r13
# qhasm: maj7 ^= r0andr1
# asm 1: xor <r0andr1=int64#14,<maj7=int64#12
# asm 2: xor <r0andr1=%rbx,<maj7=%r14
xor %rbx,%r14
# qhasm: ch6 = r5
# asm 1: mov <r5=int64#8,>ch6=int64#13
# asm 2: mov <r5=%r10,>ch6=%r15
mov %r10,%r15
# qhasm: r3Sigma1 = r3>>>14
# asm 1: rorx $14,<r3=int64#6,>r3Sigma1=int64#15
# asm 2: rorx $14,<r3=%r9,>r3Sigma1=%rbp
rorx $14,%r9,%rbp
# qhasm: ch6 ^= r4
# asm 1: xor <r4=int64#9,<ch6=int64#13
# asm 2: xor <r4=%r11,<ch6=%r15
xor %r11,%r15
# qhasm: X4 = mem256[in + 32]
# asm 1: vmovupd 32(<in=int64#1),>X4=reg256#3
# asm 2: vmovupd 32(<in=%rdi),>X4=%ymm2
vmovupd 32(%rdi),%ymm2
# qhasm: 2x 16x X4 = X4[bigendian64]
# asm 1: vpshufb <bigendian64=reg256#1,<X4=reg256#3,>X4=reg256#3
# asm 2: vpshufb <bigendian64=%ymm0,<X4=%ymm2,>X4=%ymm2
vpshufb %ymm0,%ymm2,%ymm2
# qhasm: r7 += maj7
# asm 1: add <maj7=int64#12,<r7=int64#11
# asm 2: add <maj7=%r14,<r7=%r13
add %r14,%r13
# qhasm: r318 = r3>>>18
# asm 1: rorx $18,<r3=int64#6,>r318=int64#12
# asm 2: rorx $18,<r3=%r9,>r318=%r14
rorx $18,%r9,%r14
# qhasm: 4x D4 = X4 + mem256[constants + 32]
# asm 1: vpaddq 32(<constants=int64#5),<X4=reg256#3,>D4=reg256#4
# asm 2: vpaddq 32(<constants=%r8),<X4=%ymm2,>D4=%ymm3
vpaddq 32(%r8),%ymm2,%ymm3
# qhasm: ch6 &= r3
# asm 1: and <r3=int64#6,<ch6=int64#13
# asm 2: and <r3=%r9,<ch6=%r15
and %r9,%r15
# qhasm: r3Sigma1 ^= r318
# asm 1: xor <r318=int64#12,<r3Sigma1=int64#15
# asm 2: xor <r318=%r14,<r3Sigma1=%rbp
xor %r14,%rbp
# qhasm: maj6 &= r7
# asm 1: and <r7=int64#11,<maj6=int64#7
# asm 2: and <r7=%r13,<maj6=%rax
and %r13,%rax
# qhasm: inplace state4567[2] = r6
# asm 1: movq <r6=int64#10,<state4567=stack256#2
# asm 2: movq <r6=%r12,<state4567=48(%rsp)
movq %r12,48(%rsp)
# qhasm: r341 = r3>>>41
# asm 1: rorx $41,<r3=int64#6,>r341=int64#12
# asm 2: rorx $41,<r3=%r9,>r341=%r14
rorx $41,%r9,%r14
# qhasm: ch6 ^= r5
# asm 1: xor <r5=int64#8,<ch6=int64#13
# asm 2: xor <r5=%r10,<ch6=%r15
xor %r10,%r15
# qhasm: maj6 ^= r0andr1
# asm 1: xor <r0andr1=int64#14,<maj6=int64#7
# asm 2: xor <r0andr1=%rbx,<maj6=%rax
xor %rbx,%rax
# qhasm: r7Sigma0 = r7>>>28
# asm 1: rorx $28,<r7=int64#11,>r7Sigma0=int64#14
# asm 2: rorx $28,<r7=%r13,>r7Sigma0=%rbx
rorx $28,%r13,%rbx
# qhasm: inplace state4567[1] = r5
# asm 1: movq <r5=int64#8,<state4567=stack256#2
# asm 2: movq <r5=%r10,<state4567=40(%rsp)
movq %r10,40(%rsp)
# qhasm: r6 += ch6
# asm 1: add <ch6=int64#13,<r6=int64#10
# asm 2: add <ch6=%r15,<r6=%r12
add %r15,%r12
# qhasm: r3Sigma1 ^= r341
# asm 1: xor <r341=int64#12,<r3Sigma1=int64#15
# asm 2: xor <r341=%r14,<r3Sigma1=%rbp
xor %r14,%rbp
# qhasm: r734 = r7>>>34
# asm 1: rorx $34,<r7=int64#11,>r734=int64#12
# asm 2: rorx $34,<r7=%r13,>r734=%r14
rorx $34,%r13,%r14
# qhasm: inplace state0123[2] = r2
# asm 1: movq <r2=int64#3,<state0123=stack256#1
# asm 2: movq <r2=%rdx,<state0123=16(%rsp)
movq %rdx,16(%rsp)
# qhasm: r5 += wc0123[2]
# asm 1: addq <wc0123=stack256#3,<r5=int64#8
# asm 2: addq <wc0123=80(%rsp),<r5=%r10
addq 80(%rsp),%r10
# qhasm: r739 = r7>>>39
# asm 1: rorx $39,<r7=int64#11,>r739=int64#13
# asm 2: rorx $39,<r7=%r13,>r739=%r15
rorx $39,%r13,%r15
# qhasm: r7Sigma0 ^= r734
# asm 1: xor <r734=int64#12,<r7Sigma0=int64#14
# asm 2: xor <r734=%r14,<r7Sigma0=%rbx
xor %r14,%rbx
# qhasm: ch5 = r4
# asm 1: mov <r4=int64#9,>ch5=int64#12
# asm 2: mov <r4=%r11,>ch5=%r14
mov %r11,%r14
# qhasm: ch5 ^= r3
# asm 1: xor <r3=int64#6,<ch5=int64#12
# asm 2: xor <r3=%r9,<ch5=%r14
xor %r9,%r14
# qhasm: r6 += wc0123[1]
# asm 1: addq <wc0123=stack256#3,<r6=int64#10
# asm 2: addq <wc0123=72(%rsp),<r6=%r12
addq 72(%rsp),%r12
# qhasm: r6 += r3Sigma1
# asm 1: add <r3Sigma1=int64#15,<r6=int64#10
# asm 2: add <r3Sigma1=%rbp,<r6=%r12
add %rbp,%r12
# qhasm: mem256[&w + 0] = X0
# asm 1: vmovupd <X0=reg256#2,<w=stack1280#1
# asm 2: vmovupd <X0=%ymm1,<w=160(%rsp)
vmovupd %ymm1,160(%rsp)
# qhasm: r7Sigma0 ^= r739
# asm 1: xor <r739=int64#13,<r7Sigma0=int64#14
# asm 2: xor <r739=%r15,<r7Sigma0=%rbx
xor %r15,%rbx
# qhasm: r2 += r6
# asm 1: add <r6=int64#10,<r2=int64#3
# asm 2: add <r6=%r12,<r2=%rdx
add %r12,%rdx
# qhasm: r6 += maj6
# asm 1: add <maj6=int64#7,<r6=int64#10
# asm 2: add <maj6=%rax,<r6=%r12
add %rax,%r12
# qhasm: inplace state4567[0] = r4
# asm 1: movq <r4=int64#9,<state4567=stack256#2
# asm 2: movq <r4=%r11,<state4567=32(%rsp)
movq %r11,32(%rsp)
# qhasm: r2Sigma1 = r2>>>14
# asm 1: rorx $14,<r2=int64#3,>r2Sigma1=int64#7
# asm 2: rorx $14,<r2=%rdx,>r2Sigma1=%rax
rorx $14,%rdx,%rax
# qhasm: r6 += r7Sigma0
# asm 1: add <r7Sigma0=int64#14,<r6=int64#10
# asm 2: add <r7Sigma0=%rbx,<r6=%r12
add %rbx,%r12
# qhasm: ch5 &= r2
# asm 1: and <r2=int64#3,<ch5=int64#12
# asm 2: and <r2=%rdx,<ch5=%r14
and %rdx,%r14
# qhasm: r218 = r2>>>18
# asm 1: rorx $18,<r2=int64#3,>r218=int64#13
# asm 2: rorx $18,<r2=%rdx,>r218=%r15
rorx $18,%rdx,%r15
# qhasm: mem256[&w + 32] = X4
# asm 1: vmovupd <X4=reg256#3,<w=stack1280#1
# asm 2: vmovupd <X4=%ymm2,<w=192(%rsp)
vmovupd %ymm2,192(%rsp)
# qhasm: r2Sigma1 ^= r218
# asm 1: xor <r218=int64#13,<r2Sigma1=int64#7
# asm 2: xor <r218=%r15,<r2Sigma1=%rax
xor %r15,%rax
# qhasm: ch5 ^= r4
# asm 1: xor <r4=int64#9,<ch5=int64#12
# asm 2: xor <r4=%r11,<ch5=%r14
xor %r11,%r14
# qhasm: r241 = r2>>>41
# asm 1: rorx $41,<r2=int64#3,>r241=int64#13
# asm 2: rorx $41,<r2=%rdx,>r241=%r15
rorx $41,%rdx,%r15
# qhasm: maj4 = r7
# asm 1: mov <r7=int64#11,>maj4=int64#14
# asm 2: mov <r7=%r13,>maj4=%rbx
mov %r13,%rbx
# qhasm: maj4 ^= r6
# asm 1: xor <r6=int64#10,<maj4=int64#14
# asm 2: xor <r6=%r12,<maj4=%rbx
xor %r12,%rbx
# qhasm: r6Sigma0 = r6>>>28
# asm 1: rorx $28,<r6=int64#10,>r6Sigma0=int64#15
# asm 2: rorx $28,<r6=%r12,>r6Sigma0=%rbp
rorx $28,%r12,%rbp
# qhasm: wc4567 = D4
# asm 1: vmovapd <D4=reg256#4,>wc4567=stack256#4
# asm 2: vmovapd <D4=%ymm3,>wc4567=96(%rsp)
vmovapd %ymm3,96(%rsp)
# qhasm: r2Sigma1 ^= r241
# asm 1: xor <r241=int64#13,<r2Sigma1=int64#7
# asm 2: xor <r241=%r15,<r2Sigma1=%rax
xor %r15,%rax
# qhasm: r5 += ch5
# asm 1: add <ch5=int64#12,<r5=int64#8
# asm 2: add <ch5=%r14,<r5=%r10
add %r14,%r10
# qhasm: r634 = r6>>>34
# asm 1: rorx $34,<r6=int64#10,>r634=int64#12
# asm 2: rorx $34,<r6=%r12,>r634=%r14
rorx $34,%r12,%r14
# qhasm: in_stack = in
# asm 1: movq <in=int64#1,>in_stack=stack64#11
# asm 2: movq <in=%rdi,>in_stack=400(%rsp)
movq %rdi,400(%rsp)
# qhasm: r6Sigma0 ^= r634
# asm 1: xor <r634=int64#12,<r6Sigma0=int64#15
# asm 2: xor <r634=%r14,<r6Sigma0=%rbp
xor %r14,%rbp
# qhasm: r639 = r6>>>39
# asm 1: rorx $39,<r6=int64#10,>r639=int64#12
# asm 2: rorx $39,<r6=%r12,>r639=%r14
rorx $39,%r12,%r14
# qhasm: r6andr7 = r7
# asm 1: mov <r7=int64#11,>r6andr7=int64#13
# asm 2: mov <r7=%r13,>r6andr7=%r15
mov %r13,%r15
# qhasm: r6andr7 &= r6
# asm 1: and <r6=int64#10,<r6andr7=int64#13
# asm 2: and <r6=%r12,<r6andr7=%r15
and %r12,%r15
# qhasm: r6Sigma0 ^= r639
# asm 1: xor <r639=int64#12,<r6Sigma0=int64#15
# asm 2: xor <r639=%r14,<r6Sigma0=%rbp
xor %r14,%rbp
# qhasm: maj5 = r0
# asm 1: mov <r0=int64#2,>maj5=int64#12
# asm 2: mov <r0=%rsi,>maj5=%r14
mov %rsi,%r14
# qhasm: inplace state0123[1] = r1
# asm 1: movq <r1=int64#4,<state0123=stack256#1
# asm 2: movq <r1=%rcx,<state0123=8(%rsp)
movq %rcx,8(%rsp)
# qhasm: r5 += r2Sigma1
# asm 1: add <r2Sigma1=int64#7,<r5=int64#8
# asm 2: add <r2Sigma1=%rax,<r5=%r10
add %rax,%r10
# qhasm: r4 += wc0123[3]
# asm 1: addq <wc0123=stack256#3,<r4=int64#9
# asm 2: addq <wc0123=88(%rsp),<r4=%r11
addq 88(%rsp),%r11
# qhasm: maj5 &= maj4
# asm 1: and <maj4=int64#14,<maj5=int64#12
# asm 2: and <maj4=%rbx,<maj5=%r14
and %rbx,%r14
# qhasm: r1 += r5
# asm 1: add <r5=int64#8,<r1=int64#4
# asm 2: add <r5=%r10,<r1=%rcx
add %r10,%rcx
# qhasm: r5 += r6Sigma0
# asm 1: add <r6Sigma0=int64#15,<r5=int64#8
# asm 2: add <r6Sigma0=%rbp,<r5=%r10
add %rbp,%r10
# qhasm: maj5 ^= r6andr7
# asm 1: xor <r6andr7=int64#13,<maj5=int64#12
# asm 2: xor <r6andr7=%r15,<maj5=%r14
xor %r15,%r14
# qhasm: ch4 = r3
# asm 1: mov <r3=int64#6,>ch4=int64#7
# asm 2: mov <r3=%r9,>ch4=%rax
mov %r9,%rax
# qhasm: r1Sigma1 = r1>>>14
# asm 1: rorx $14,<r1=int64#4,>r1Sigma1=int64#15
# asm 2: rorx $14,<r1=%rcx,>r1Sigma1=%rbp
rorx $14,%rcx,%rbp
# qhasm: ch4 ^= r2
# asm 1: xor <r2=int64#3,<ch4=int64#7
# asm 2: xor <r2=%rdx,<ch4=%rax
xor %rdx,%rax
# qhasm: r5 += maj5
# asm 1: add <maj5=int64#12,<r5=int64#8
# asm 2: add <maj5=%r14,<r5=%r10
add %r14,%r10
# qhasm: ch4 &= r1
# asm 1: and <r1=int64#4,<ch4=int64#7
# asm 2: and <r1=%rcx,<ch4=%rax
and %rcx,%rax
# qhasm: r118 = r1>>>18
# asm 1: rorx $18,<r1=int64#4,>r118=int64#12
# asm 2: rorx $18,<r1=%rcx,>r118=%r14
rorx $18,%rcx,%r14
# qhasm: inplace state0123[0] = r0
# asm 1: movq <r0=int64#2,<state0123=stack256#1
# asm 2: movq <r0=%rsi,<state0123=0(%rsp)
movq %rsi,0(%rsp)
# qhasm: r1Sigma1 ^= r118
# asm 1: xor <r118=int64#12,<r1Sigma1=int64#15
# asm 2: xor <r118=%r14,<r1Sigma1=%rbp
xor %r14,%rbp
# qhasm: maj4 &= r5
# asm 1: and <r5=int64#8,<maj4=int64#14
# asm 2: and <r5=%r10,<maj4=%rbx
and %r10,%rbx
# qhasm: ch4 ^= r3
# asm 1: xor <r3=int64#6,<ch4=int64#7
# asm 2: xor <r3=%r9,<ch4=%rax
xor %r9,%rax
# qhasm: r141 = r1>>>41
# asm 1: rorx $41,<r1=int64#4,>r141=int64#12
# asm 2: rorx $41,<r1=%rcx,>r141=%r14
rorx $41,%rcx,%r14
# qhasm: X8 = mem256[in + 64]
# asm 1: vmovupd 64(<in=int64#1),>X8=reg256#4
# asm 2: vmovupd 64(<in=%rdi),>X8=%ymm3
vmovupd 64(%rdi),%ymm3
# qhasm: r1Sigma1 ^= r141
# asm 1: xor <r141=int64#12,<r1Sigma1=int64#15
# asm 2: xor <r141=%r14,<r1Sigma1=%rbp
xor %r14,%rbp
# qhasm: r5Sigma0 = r5>>>28
# asm 1: rorx $28,<r5=int64#8,>r5Sigma0=int64#12
# asm 2: rorx $28,<r5=%r10,>r5Sigma0=%r14
rorx $28,%r10,%r14
# qhasm: maj4 ^= r6andr7
# asm 1: xor <r6andr7=int64#13,<maj4=int64#14
# asm 2: xor <r6andr7=%r15,<maj4=%rbx
xor %r15,%rbx
# qhasm: r4 += ch4
# asm 1: add <ch4=int64#7,<r4=int64#9
# asm 2: add <ch4=%rax,<r4=%r11
add %rax,%r11
# qhasm: r534 = r5>>>34
# asm 1: rorx $34,<r5=int64#8,>r534=int64#7
# asm 2: rorx $34,<r5=%r10,>r534=%rax
rorx $34,%r10,%rax
# qhasm: r4 += r1Sigma1
# asm 1: add <r1Sigma1=int64#15,<r4=int64#9
# asm 2: add <r1Sigma1=%rbp,<r4=%r11
add %rbp,%r11
# qhasm: r5Sigma0 ^= r534
# asm 1: xor <r534=int64#7,<r5Sigma0=int64#12
# asm 2: xor <r534=%rax,<r5Sigma0=%r14
xor %rax,%r14
# qhasm: r3 += wc4567[0]
# asm 1: addq <wc4567=stack256#4,<r3=int64#6
# asm 2: addq <wc4567=96(%rsp),<r3=%r9
addq 96(%rsp),%r9
# qhasm: X12 = mem256[in + 96]
# asm 1: vmovupd 96(<in=int64#1),>X12=reg256#5
# asm 2: vmovupd 96(<in=%rdi),>X12=%ymm4
vmovupd 96(%rdi),%ymm4
# qhasm: r0 += r4
# asm 1: add <r4=int64#9,<r0=int64#2
# asm 2: add <r4=%r11,<r0=%rsi
add %r11,%rsi
# qhasm: r539 = r5>>>39
# asm 1: rorx $39,<r5=int64#8,>r539=int64#1
# asm 2: rorx $39,<r5=%r10,>r539=%rdi
rorx $39,%r10,%rdi
# qhasm: r4 += maj4
# asm 1: add <maj4=int64#14,<r4=int64#9
# asm 2: add <maj4=%rbx,<r4=%r11
add %rbx,%r11
# qhasm: r5Sigma0 ^= r539
# asm 1: xor <r539=int64#1,<r5Sigma0=int64#12
# asm 2: xor <r539=%rdi,<r5Sigma0=%r14
xor %rdi,%r14
# qhasm: r0Sigma1 = r0>>>14
# asm 1: rorx $14,<r0=int64#2,>r0Sigma1=int64#1
# asm 2: rorx $14,<r0=%rsi,>r0Sigma1=%rdi
rorx $14,%rsi,%rdi
# qhasm: r4 += r5Sigma0
# asm 1: add <r5Sigma0=int64#12,<r4=int64#9
# asm 2: add <r5Sigma0=%r14,<r4=%r11
add %r14,%r11
# qhasm: ch3 = r2
# asm 1: mov <r2=int64#3,>ch3=int64#7
# asm 2: mov <r2=%rdx,>ch3=%rax
mov %rdx,%rax
# qhasm: r018 = r0>>>18
# asm 1: rorx $18,<r0=int64#2,>r018=int64#12
# asm 2: rorx $18,<r0=%rsi,>r018=%r14
rorx $18,%rsi,%r14
# qhasm: ch3 ^= r1
# asm 1: xor <r1=int64#4,<ch3=int64#7
# asm 2: xor <r1=%rcx,<ch3=%rax
xor %rcx,%rax
# qhasm: 2x 16x X8 = X8[bigendian64]
# asm 1: vpshufb <bigendian64=reg256#1,<X8=reg256#4,>X8=reg256#4
# asm 2: vpshufb <bigendian64=%ymm0,<X8=%ymm3,>X8=%ymm3
vpshufb %ymm0,%ymm3,%ymm3
# qhasm: r0Sigma1 ^= r018
# asm 1: xor <r018=int64#12,<r0Sigma1=int64#1
# asm 2: xor <r018=%r14,<r0Sigma1=%rdi
xor %r14,%rdi
# qhasm: ch3 &= r0
# asm 1: and <r0=int64#2,<ch3=int64#7
# asm 2: and <r0=%rsi,<ch3=%rax
and %rsi,%rax
# qhasm: r041 = r0>>>41
# asm 1: rorx $41,<r0=int64#2,>r041=int64#12
# asm 2: rorx $41,<r0=%rsi,>r041=%r14
rorx $41,%rsi,%r14
# qhasm: r4Sigma0 = r4>>>28
# asm 1: rorx $28,<r4=int64#9,>r4Sigma0=int64#13
# asm 2: rorx $28,<r4=%r11,>r4Sigma0=%r15
rorx $28,%r11,%r15
# qhasm: 4x D8 = X8 + mem256[constants + 64]
# asm 1: vpaddq 64(<constants=int64#5),<X8=reg256#4,>D8=reg256#6
# asm 2: vpaddq 64(<constants=%r8),<X8=%ymm3,>D8=%ymm5
vpaddq 64(%r8),%ymm3,%ymm5
# qhasm: r0Sigma1 ^= r041
# asm 1: xor <r041=int64#12,<r0Sigma1=int64#1
# asm 2: xor <r041=%r14,<r0Sigma1=%rdi
xor %r14,%rdi
# qhasm: ch3 ^= r2
# asm 1: xor <r2=int64#3,<ch3=int64#7
# asm 2: xor <r2=%rdx,<ch3=%rax
xor %rdx,%rax
# qhasm: mem256[&w + 64] = X8
# asm 1: vmovupd <X8=reg256#4,<w=stack1280#1
# asm 2: vmovupd <X8=%ymm3,<w=224(%rsp)
vmovupd %ymm3,224(%rsp)
# qhasm: r3 += ch3
# asm 1: add <ch3=int64#7,<r3=int64#6
# asm 2: add <ch3=%rax,<r3=%r9
add %rax,%r9
# qhasm: r434 = r4>>>34
# asm 1: rorx $34,<r4=int64#9,>r434=int64#7
# asm 2: rorx $34,<r4=%r11,>r434=%rax
rorx $34,%r11,%rax
# qhasm: r439 = r4>>>39
# asm 1: rorx $39,<r4=int64#9,>r439=int64#12
# asm 2: rorx $39,<r4=%r11,>r439=%r14
rorx $39,%r11,%r14
# qhasm: maj2 = r5
# asm 1: mov <r5=int64#8,>maj2=int64#14
# asm 2: mov <r5=%r10,>maj2=%rbx
mov %r10,%rbx
# qhasm: maj2 ^= r4
# asm 1: xor <r4=int64#9,<maj2=int64#14
# asm 2: xor <r4=%r11,<maj2=%rbx
xor %r11,%rbx
# qhasm: wc891011 = D8
# asm 1: vmovapd <D8=reg256#6,>wc891011=stack256#3
# asm 2: vmovapd <D8=%ymm5,>wc891011=64(%rsp)
vmovapd %ymm5,64(%rsp)
# qhasm: r4Sigma0 ^= r434
# asm 1: xor <r434=int64#7,<r4Sigma0=int64#13
# asm 2: xor <r434=%rax,<r4Sigma0=%r15
xor %rax,%r15
# qhasm: r3 += r0Sigma1
# asm 1: add <r0Sigma1=int64#1,<r3=int64#6
# asm 2: add <r0Sigma1=%rdi,<r3=%r9
add %rdi,%r9
# qhasm: r4andr5 = r5
# asm 1: mov <r5=int64#8,>r4andr5=int64#1
# asm 2: mov <r5=%r10,>r4andr5=%rdi
mov %r10,%rdi
# qhasm: r4andr5 &= r4
# asm 1: and <r4=int64#9,<r4andr5=int64#1
# asm 2: and <r4=%r11,<r4andr5=%rdi
and %r11,%rdi
# qhasm: r2 += wc4567[1]
# asm 1: addq <wc4567=stack256#4,<r2=int64#3
# asm 2: addq <wc4567=104(%rsp),<r2=%rdx
addq 104(%rsp),%rdx
# qhasm: r4Sigma0 ^= r439
# asm 1: xor <r439=int64#12,<r4Sigma0=int64#13
# asm 2: xor <r439=%r14,<r4Sigma0=%r15
xor %r14,%r15
# qhasm: maj3 = r6
# asm 1: mov <r6=int64#10,>maj3=int64#7
# asm 2: mov <r6=%r12,>maj3=%rax
mov %r12,%rax
# qhasm: maj3 &= maj2
# asm 1: and <maj2=int64#14,<maj3=int64#7
# asm 2: and <maj2=%rbx,<maj3=%rax
and %rbx,%rax
# qhasm: r7 += r3
# asm 1: add <r3=int64#6,<r7=int64#11
# asm 2: add <r3=%r9,<r7=%r13
add %r9,%r13
# qhasm: r3 += r4Sigma0
# asm 1: add <r4Sigma0=int64#13,<r3=int64#6
# asm 2: add <r4Sigma0=%r15,<r3=%r9
add %r15,%r9
# qhasm: 2x 16x X12 = X12[bigendian64]
# asm 1: vpshufb <bigendian64=reg256#1,<X12=reg256#5,>X12=reg256#5
# asm 2: vpshufb <bigendian64=%ymm0,<X12=%ymm4,>X12=%ymm4
vpshufb %ymm0,%ymm4,%ymm4
# qhasm: ch2 = r1
# asm 1: mov <r1=int64#4,>ch2=int64#12
# asm 2: mov <r1=%rcx,>ch2=%r14
mov %rcx,%r14
# qhasm: maj3 ^= r4andr5
# asm 1: xor <r4andr5=int64#1,<maj3=int64#7
# asm 2: xor <r4andr5=%rdi,<maj3=%rax
xor %rdi,%rax
# qhasm: ch2 ^= r0
# asm 1: xor <r0=int64#2,<ch2=int64#12
# asm 2: xor <r0=%rsi,<ch2=%r14
xor %rsi,%r14
# qhasm: r3 += maj3
# asm 1: add <maj3=int64#7,<r3=int64#6
# asm 2: add <maj3=%rax,<r3=%r9
add %rax,%r9
# qhasm: r7Sigma1 = r7>>>14
# asm 1: rorx $14,<r7=int64#11,>r7Sigma1=int64#7
# asm 2: rorx $14,<r7=%r13,>r7Sigma1=%rax
rorx $14,%r13,%rax
# qhasm: 4x D12 = X12 + mem256[constants + 96]
# asm 1: vpaddq 96(<constants=int64#5),<X12=reg256#5,>D12=reg256#6
# asm 2: vpaddq 96(<constants=%r8),<X12=%ymm4,>D12=%ymm5
vpaddq 96(%r8),%ymm4,%ymm5
# qhasm: ch2 &= r7
# asm 1: and <r7=int64#11,<ch2=int64#12
# asm 2: and <r7=%r13,<ch2=%r14
and %r13,%r14
# qhasm: r718 = r7>>>18
# asm 1: rorx $18,<r7=int64#11,>r718=int64#13
# asm 2: rorx $18,<r7=%r13,>r718=%r15
rorx $18,%r13,%r15
# qhasm: r7Sigma1 ^= r718
# asm 1: xor <r718=int64#13,<r7Sigma1=int64#7
# asm 2: xor <r718=%r15,<r7Sigma1=%rax
xor %r15,%rax
# qhasm: maj2 &= r3
# asm 1: and <r3=int64#6,<maj2=int64#14
# asm 2: and <r3=%r9,<maj2=%rbx
and %r9,%rbx
# qhasm: ch2 ^= r1
# asm 1: xor <r1=int64#4,<ch2=int64#12
# asm 2: xor <r1=%rcx,<ch2=%r14
xor %rcx,%r14
# qhasm: r741 = r7>>>41
# asm 1: rorx $41,<r7=int64#11,>r741=int64#13
# asm 2: rorx $41,<r7=%r13,>r741=%r15
rorx $41,%r13,%r15
# qhasm: mem256[&w + 96] = X12
# asm 1: vmovupd <X12=reg256#5,<w=stack1280#1
# asm 2: vmovupd <X12=%ymm4,<w=256(%rsp)
vmovupd %ymm4,256(%rsp)
# qhasm: r7Sigma1 ^= r741
# asm 1: xor <r741=int64#13,<r7Sigma1=int64#7
# asm 2: xor <r741=%r15,<r7Sigma1=%rax
xor %r15,%rax
# qhasm: maj2 ^= r4andr5
# asm 1: xor <r4andr5=int64#1,<maj2=int64#14
# asm 2: xor <r4andr5=%rdi,<maj2=%rbx
xor %rdi,%rbx
# qhasm: r3Sigma0 = r3>>>28
# asm 1: rorx $28,<r3=int64#6,>r3Sigma0=int64#1
# asm 2: rorx $28,<r3=%r9,>r3Sigma0=%rdi
rorx $28,%r9,%rdi
# qhasm: wc12131415 = D12
# asm 1: vmovapd <D12=reg256#6,>wc12131415=stack256#5
# asm 2: vmovapd <D12=%ymm5,>wc12131415=128(%rsp)
vmovapd %ymm5,128(%rsp)
# qhasm: r2 += ch2
# asm 1: add <ch2=int64#12,<r2=int64#3
# asm 2: add <ch2=%r14,<r2=%rdx
add %r14,%rdx
# qhasm: r2 += r7Sigma1
# asm 1: add <r7Sigma1=int64#7,<r2=int64#3
# asm 2: add <r7Sigma1=%rax,<r2=%rdx
add %rax,%rdx
# qhasm: r1 += wc4567[2]
# asm 1: addq <wc4567=stack256#4,<r1=int64#4
# asm 2: addq <wc4567=112(%rsp),<r1=%rcx
addq 112(%rsp),%rcx
# qhasm: ch1 = r0
# asm 1: mov <r0=int64#2,>ch1=int64#7
# asm 2: mov <r0=%rsi,>ch1=%rax
mov %rsi,%rax
# qhasm: r334 = r3>>>34
# asm 1: rorx $34,<r3=int64#6,>r334=int64#12
# asm 2: rorx $34,<r3=%r9,>r334=%r14
rorx $34,%r9,%r14
# qhasm: ch1 ^= r7
# asm 1: xor <r7=int64#11,<ch1=int64#7
# asm 2: xor <r7=%r13,<ch1=%rax
xor %r13,%rax
# qhasm: r3Sigma0 ^= r334
# asm 1: xor <r334=int64#12,<r3Sigma0=int64#1
# asm 2: xor <r334=%r14,<r3Sigma0=%rdi
xor %r14,%rdi
# qhasm: r6 += r2
# asm 1: add <r2=int64#3,<r6=int64#10
# asm 2: add <r2=%rdx,<r6=%r12
add %rdx,%r12
# qhasm: r339 = r3>>>39
# asm 1: rorx $39,<r3=int64#6,>r339=int64#12
# asm 2: rorx $39,<r3=%r9,>r339=%r14
rorx $39,%r9,%r14
# qhasm: r2 += maj2
# asm 1: add <maj2=int64#14,<r2=int64#3
# asm 2: add <maj2=%rbx,<r2=%rdx
add %rbx,%rdx
# qhasm: r3Sigma0 ^= r339
# asm 1: xor <r339=int64#12,<r3Sigma0=int64#1
# asm 2: xor <r339=%r14,<r3Sigma0=%rdi
xor %r14,%rdi
# qhasm: r6Sigma1 = r6>>>14
# asm 1: rorx $14,<r6=int64#10,>r6Sigma1=int64#12
# asm 2: rorx $14,<r6=%r12,>r6Sigma1=%r14
rorx $14,%r12,%r14
# qhasm: r618 = r6>>>18
# asm 1: rorx $18,<r6=int64#10,>r618=int64#13
# asm 2: rorx $18,<r6=%r12,>r618=%r15
rorx $18,%r12,%r15
# qhasm: r641 = r6>>>41
# asm 1: rorx $41,<r6=int64#10,>r641=int64#14
# asm 2: rorx $41,<r6=%r12,>r641=%rbx
rorx $41,%r12,%rbx
# qhasm: ch1 &= r6
# asm 1: and <r6=int64#10,<ch1=int64#7
# asm 2: and <r6=%r12,<ch1=%rax
and %r12,%rax
# qhasm: r2 += r3Sigma0
# asm 1: add <r3Sigma0=int64#1,<r2=int64#3
# asm 2: add <r3Sigma0=%rdi,<r2=%rdx
add %rdi,%rdx
# qhasm: r6Sigma1 ^= r618
# asm 1: xor <r618=int64#13,<r6Sigma1=int64#12
# asm 2: xor <r618=%r15,<r6Sigma1=%r14
xor %r15,%r14
# qhasm: r2Sigma0 = r2>>>28
# asm 1: rorx $28,<r2=int64#3,>r2Sigma0=int64#1
# asm 2: rorx $28,<r2=%rdx,>r2Sigma0=%rdi
rorx $28,%rdx,%rdi
# qhasm: r6Sigma1 ^= r641
# asm 1: xor <r641=int64#14,<r6Sigma1=int64#12
# asm 2: xor <r641=%rbx,<r6Sigma1=%r14
xor %rbx,%r14
# qhasm: ch1 ^= r0
# asm 1: xor <r0=int64#2,<ch1=int64#7
# asm 2: xor <r0=%rsi,<ch1=%rax
xor %rsi,%rax
# qhasm: r234 = r2>>>34
# asm 1: rorx $34,<r2=int64#3,>r234=int64#13
# asm 2: rorx $34,<r2=%rdx,>r234=%r15
rorx $34,%rdx,%r15
# qhasm: maj0 = r3
# asm 1: mov <r3=int64#6,>maj0=int64#14
# asm 2: mov <r3=%r9,>maj0=%rbx
mov %r9,%rbx
# qhasm: maj0 ^= r2
# asm 1: xor <r2=int64#3,<maj0=int64#14
# asm 2: xor <r2=%rdx,<maj0=%rbx
xor %rdx,%rbx
# qhasm: r2Sigma0 ^= r234
# asm 1: xor <r234=int64#13,<r2Sigma0=int64#1
# asm 2: xor <r234=%r15,<r2Sigma0=%rdi
xor %r15,%rdi
# qhasm: r1 += ch1
# asm 1: add <ch1=int64#7,<r1=int64#4
# asm 2: add <ch1=%rax,<r1=%rcx
add %rax,%rcx
# qhasm: r239 = r2>>>39
# asm 1: rorx $39,<r2=int64#3,>r239=int64#7
# asm 2: rorx $39,<r2=%rdx,>r239=%rax
rorx $39,%rdx,%rax
# qhasm: r2andr3 = r3
# asm 1: mov <r3=int64#6,>r2andr3=int64#13
# asm 2: mov <r3=%r9,>r2andr3=%r15
mov %r9,%r15
# qhasm: r2andr3 &= r2
# asm 1: and <r2=int64#3,<r2andr3=int64#13
# asm 2: and <r2=%rdx,<r2andr3=%r15
and %rdx,%r15
# qhasm: r2Sigma0 ^= r239
# asm 1: xor <r239=int64#7,<r2Sigma0=int64#1
# asm 2: xor <r239=%rax,<r2Sigma0=%rdi
xor %rax,%rdi
# qhasm: r1 += r6Sigma1
# asm 1: add <r6Sigma1=int64#12,<r1=int64#4
# asm 2: add <r6Sigma1=%r14,<r1=%rcx
add %r14,%rcx
# qhasm: maj1 = r4
# asm 1: mov <r4=int64#9,>maj1=int64#7
# asm 2: mov <r4=%r11,>maj1=%rax
mov %r11,%rax
# qhasm: maj1 &= maj0
# asm 1: and <maj0=int64#14,<maj1=int64#7
# asm 2: and <maj0=%rbx,<maj1=%rax
and %rbx,%rax
# qhasm: r0 += wc4567[3]
# asm 1: addq <wc4567=stack256#4,<r0=int64#2
# asm 2: addq <wc4567=120(%rsp),<r0=%rsi
addq 120(%rsp),%rsi
# qhasm: r5 += r1
# asm 1: add <r1=int64#4,<r5=int64#8
# asm 2: add <r1=%rcx,<r5=%r10
add %rcx,%r10
# qhasm: r1 += r2Sigma0
# asm 1: add <r2Sigma0=int64#1,<r1=int64#4
# asm 2: add <r2Sigma0=%rdi,<r1=%rcx
add %rdi,%rcx
# qhasm: ch0 = r7
# asm 1: mov <r7=int64#11,>ch0=int64#1
# asm 2: mov <r7=%r13,>ch0=%rdi
mov %r13,%rdi
# qhasm: maj1 ^= r2andr3
# asm 1: xor <r2andr3=int64#13,<maj1=int64#7
# asm 2: xor <r2andr3=%r15,<maj1=%rax
xor %r15,%rax
# qhasm: ch0 ^= r6
# asm 1: xor <r6=int64#10,<ch0=int64#1
# asm 2: xor <r6=%r12,<ch0=%rdi
xor %r12,%rdi
# qhasm: r5Sigma1 = r5>>>14
# asm 1: rorx $14,<r5=int64#8,>r5Sigma1=int64#12
# asm 2: rorx $14,<r5=%r10,>r5Sigma1=%r14
rorx $14,%r10,%r14
# qhasm: r1 += maj1
# asm 1: add <maj1=int64#7,<r1=int64#4
# asm 2: add <maj1=%rax,<r1=%rcx
add %rax,%rcx
# qhasm: ch0 &= r5
# asm 1: and <r5=int64#8,<ch0=int64#1
# asm 2: and <r5=%r10,<ch0=%rdi
and %r10,%rdi
# qhasm: r518 = r5>>>18
# asm 1: rorx $18,<r5=int64#8,>r518=int64#7
# asm 2: rorx $18,<r5=%r10,>r518=%rax
rorx $18,%r10,%rax
# qhasm: r5Sigma1 ^= r518
# asm 1: xor <r518=int64#7,<r5Sigma1=int64#12
# asm 2: xor <r518=%rax,<r5Sigma1=%r14
xor %rax,%r14
# qhasm: maj0 &= r1
# asm 1: and <r1=int64#4,<maj0=int64#14
# asm 2: and <r1=%rcx,<maj0=%rbx
and %rcx,%rbx
# qhasm: ch0 ^= r7
# asm 1: xor <r7=int64#11,<ch0=int64#1
# asm 2: xor <r7=%r13,<ch0=%rdi
xor %r13,%rdi
# qhasm: r541 = r5>>>41
# asm 1: rorx $41,<r5=int64#8,>r541=int64#7
# asm 2: rorx $41,<r5=%r10,>r541=%rax
rorx $41,%r10,%rax
# qhasm: r5Sigma1 ^= r541
# asm 1: xor <r541=int64#7,<r5Sigma1=int64#12
# asm 2: xor <r541=%rax,<r5Sigma1=%r14
xor %rax,%r14
# qhasm: maj0 ^= r2andr3
# asm 1: xor <r2andr3=int64#13,<maj0=int64#14
# asm 2: xor <r2andr3=%r15,<maj0=%rbx
xor %r15,%rbx
# qhasm: r1Sigma0 = r1>>>28
# asm 1: rorx $28,<r1=int64#4,>r1Sigma0=int64#7
# asm 2: rorx $28,<r1=%rcx,>r1Sigma0=%rax
rorx $28,%rcx,%rax
# qhasm: r0 += ch0
# asm 1: add <ch0=int64#1,<r0=int64#2
# asm 2: add <ch0=%rdi,<r0=%rsi
add %rdi,%rsi
# qhasm: r0 += r5Sigma1
# asm 1: add <r5Sigma1=int64#12,<r0=int64#2
# asm 2: add <r5Sigma1=%r14,<r0=%rsi
add %r14,%rsi
# qhasm: r134 = r1>>>34
# asm 1: rorx $34,<r1=int64#4,>r134=int64#1
# asm 2: rorx $34,<r1=%rcx,>r134=%rdi
rorx $34,%rcx,%rdi
# qhasm: r1Sigma0 ^= r134
# asm 1: xor <r134=int64#1,<r1Sigma0=int64#7
# asm 2: xor <r134=%rdi,<r1Sigma0=%rax
xor %rdi,%rax
# qhasm: r4 += r0
# asm 1: add <r0=int64#2,<r4=int64#9
# asm 2: add <r0=%rsi,<r4=%r11
add %rsi,%r11
# qhasm: r0 += maj0
# asm 1: add <maj0=int64#14,<r0=int64#2
# asm 2: add <maj0=%rbx,<r0=%rsi
add %rbx,%rsi
# qhasm: r139 = r1>>>39
# asm 1: rorx $39,<r1=int64#4,>r139=int64#1
# asm 2: rorx $39,<r1=%rcx,>r139=%rdi
rorx $39,%rcx,%rdi
# qhasm: r1Sigma0 ^= r139
# asm 1: xor <r139=int64#1,<r1Sigma0=int64#7
# asm 2: xor <r139=%rdi,<r1Sigma0=%rax
xor %rdi,%rax
# qhasm: r0 += r1Sigma0
# asm 1: add <r1Sigma0=int64#7,<r0=int64#2
# asm 2: add <r1Sigma0=%rax,<r0=%rsi
add %rax,%rsi
# qhasm: i = 4
# asm 1: mov $4,>i=int64#1
# asm 2: mov $4,>i=%rdi
mov $4,%rdi
# qhasm: innerloop:
._innerloop:
# qhasm: X1 = mem256[&w + 8]
# asm 1: vmovupd <w=stack1280#1,>X1=reg256#6
# asm 2: vmovupd <w=168(%rsp),>X1=%ymm5
vmovupd 168(%rsp),%ymm5
# qhasm: 4x X1right1 = X1 unsigned>> 1
# asm 1: vpsrlq $1,<X1=reg256#6,>X1right1=reg256#7
# asm 2: vpsrlq $1,<X1=%ymm5,>X1right1=%ymm6
vpsrlq $1,%ymm5,%ymm6
# qhasm: r4Sigma1 = r4>>>14
# asm 1: rorx $14,<r4=int64#9,>r4Sigma1=int64#7
# asm 2: rorx $14,<r4=%r11,>r4Sigma1=%rax
rorx $14,%r11,%rax
# qhasm: r7 += wc891011[0]
# asm 1: addq <wc891011=stack256#3,<r7=int64#11
# asm 2: addq <wc891011=64(%rsp),<r7=%r13
addq 64(%rsp),%r13
# qhasm: 4x X1left63 = X1 << 63
# asm 1: vpsllq $63,<X1=reg256#6,>X1left63=reg256#8
# asm 2: vpsllq $63,<X1=%ymm5,>X1left63=%ymm7
vpsllq $63,%ymm5,%ymm7
# qhasm: ch7 = r6
# asm 1: mov <r6=int64#10,>ch7=int64#12
# asm 2: mov <r6=%r12,>ch7=%r14
mov %r12,%r14
# qhasm: ch7 ^= r5
# asm 1: xor <r5=int64#8,<ch7=int64#12
# asm 2: xor <r5=%r10,<ch7=%r14
xor %r10,%r14
# qhasm: r418 = r4>>>18
# asm 1: rorx $18,<r4=int64#9,>r418=int64#13
# asm 2: rorx $18,<r4=%r11,>r418=%r15
rorx $18,%r11,%r15
# qhasm: ch7 &= r4
# asm 1: and <r4=int64#9,<ch7=int64#12
# asm 2: and <r4=%r11,<ch7=%r14
and %r11,%r14
# qhasm: maj6 = r1
# asm 1: mov <r1=int64#4,>maj6=int64#14
# asm 2: mov <r1=%rcx,>maj6=%rbx
mov %rcx,%rbx
# qhasm: maj6 ^= r0
# asm 1: xor <r0=int64#2,<maj6=int64#14
# asm 2: xor <r0=%rsi,<maj6=%rbx
xor %rsi,%rbx
# qhasm: W14 = mem128[&w + 112],0
# asm 1: vmovupd <w=stack1280#1,>W14=reg256#9%128
# asm 2: vmovupd <w=272(%rsp),>W14=%xmm8
vmovupd 272(%rsp),%xmm8
# qhasm: r441 = r4>>>41
# asm 1: rorx $41,<r4=int64#9,>r441=int64#15
# asm 2: rorx $41,<r4=%r11,>r441=%rbp
rorx $41,%r11,%rbp
# qhasm: r4Sigma1 ^= r418
# asm 1: xor <r418=int64#13,<r4Sigma1=int64#7
# asm 2: xor <r418=%r15,<r4Sigma1=%rax
xor %r15,%rax
# qhasm: ch7 ^= r6
# asm 1: xor <r6=int64#10,<ch7=int64#12
# asm 2: xor <r6=%r12,<ch7=%r14
xor %r12,%r14
# qhasm: X1sigma0 = X1right1 ^ X1left63
# asm 1: vpxor <X1right1=reg256#7,<X1left63=reg256#8,>X1sigma0=reg256#7
# asm 2: vpxor <X1right1=%ymm6,<X1left63=%ymm7,>X1sigma0=%ymm6
vpxor %ymm6,%ymm7,%ymm6
# qhasm: 4x X1right8 = X1 unsigned>> 8
# asm 1: vpsrlq $8,<X1=reg256#6,>X1right8=reg256#8
# asm 2: vpsrlq $8,<X1=%ymm5,>X1right8=%ymm7
vpsrlq $8,%ymm5,%ymm7
# qhasm: r4Sigma1 ^= r441
# asm 1: xor <r441=int64#15,<r4Sigma1=int64#7
# asm 2: xor <r441=%rbp,<r4Sigma1=%rax
xor %rbp,%rax
# qhasm: r7 += ch7
# asm 1: add <ch7=int64#12,<r7=int64#11
# asm 2: add <ch7=%r14,<r7=%r13
add %r14,%r13
# qhasm: r0Sigma0 = r0>>>28
# asm 1: rorx $28,<r0=int64#2,>r0Sigma0=int64#12
# asm 2: rorx $28,<r0=%rsi,>r0Sigma0=%r14
rorx $28,%rsi,%r14
# qhasm: r034 = r0>>>34
# asm 1: rorx $34,<r0=int64#2,>r034=int64#13
# asm 2: rorx $34,<r0=%rsi,>r034=%r15
rorx $34,%rsi,%r15
# qhasm: X1sigma0 = X1sigma0 ^ X1right8
# asm 1: vpxor <X1sigma0=reg256#7,<X1right8=reg256#8,>X1sigma0=reg256#7
# asm 2: vpxor <X1sigma0=%ymm6,<X1right8=%ymm7,>X1sigma0=%ymm6
vpxor %ymm6,%ymm7,%ymm6
# qhasm: r7 += r4Sigma1
# asm 1: add <r4Sigma1=int64#7,<r7=int64#11
# asm 2: add <r4Sigma1=%rax,<r7=%r13
add %rax,%r13
# qhasm: maj7 = r2
# asm 1: mov <r2=int64#3,>maj7=int64#7
# asm 2: mov <r2=%rdx,>maj7=%rax
mov %rdx,%rax
# qhasm: maj7 &= maj6
# asm 1: and <maj6=int64#14,<maj7=int64#7
# asm 2: and <maj6=%rbx,<maj7=%rax
and %rbx,%rax
# qhasm: r0Sigma0 ^= r034
# asm 1: xor <r034=int64#13,<r0Sigma0=int64#12
# asm 2: xor <r034=%r15,<r0Sigma0=%r14
xor %r15,%r14
# qhasm: 2x,0 W14right19 = W14 unsigned>> 19
# asm 1: vpsrlq $19,<W14=reg256#9%128,>W14right19=reg256#8%128
# asm 2: vpsrlq $19,<W14=%xmm8,>W14right19=%xmm7
vpsrlq $19,%xmm8,%xmm7
# qhasm: r039 = r0>>>39
# asm 1: rorx $39,<r0=int64#2,>r039=int64#13
# asm 2: rorx $39,<r0=%rsi,>r039=%r15
rorx $39,%rsi,%r15
# qhasm: 4x X1left56 = X1 << 56
# asm 1: vpsllq $56,<X1=reg256#6,>X1left56=reg256#10
# asm 2: vpsllq $56,<X1=%ymm5,>X1left56=%ymm9
vpsllq $56,%ymm5,%ymm9
# qhasm: r3 += r7
# asm 1: add <r7=int64#11,<r3=int64#6
# asm 2: add <r7=%r13,<r3=%r9
add %r13,%r9
# qhasm: r0Sigma0 ^= r039
# asm 1: xor <r039=int64#13,<r0Sigma0=int64#12
# asm 2: xor <r039=%r15,<r0Sigma0=%r14
xor %r15,%r14
# qhasm: r6 += wc891011[1]
# asm 1: addq <wc891011=stack256#3,<r6=int64#10
# asm 2: addq <wc891011=72(%rsp),<r6=%r12
addq 72(%rsp),%r12
# qhasm: r0andr1 = r1
# asm 1: mov <r1=int64#4,>r0andr1=int64#13
# asm 2: mov <r1=%rcx,>r0andr1=%r15
mov %rcx,%r15
# qhasm: r0andr1 &= r0
# asm 1: and <r0=int64#2,<r0andr1=int64#13
# asm 2: and <r0=%rsi,<r0andr1=%r15
and %rsi,%r15
# qhasm: 2x,0 W14left45 = W14 << 45
# asm 1: vpsllq $45,<W14=reg256#9%128,>W14left45=reg256#11%128
# asm 2: vpsllq $45,<W14=%xmm8,>W14left45=%xmm10
vpsllq $45,%xmm8,%xmm10
# qhasm: r7 += r0Sigma0
# asm 1: add <r0Sigma0=int64#12,<r7=int64#11
# asm 2: add <r0Sigma0=%r14,<r7=%r13
add %r14,%r13
# qhasm: maj7 ^= r0andr1
# asm 1: xor <r0andr1=int64#13,<maj7=int64#7
# asm 2: xor <r0andr1=%r15,<maj7=%rax
xor %r15,%rax
# qhasm: ch6 = r5
# asm 1: mov <r5=int64#8,>ch6=int64#12
# asm 2: mov <r5=%r10,>ch6=%r14
mov %r10,%r14
# qhasm: ch6 ^= r4
# asm 1: xor <r4=int64#9,<ch6=int64#12
# asm 2: xor <r4=%r11,<ch6=%r14
xor %r11,%r14
# qhasm: 2x,0 W14right61 = W14 unsigned>> 61
# asm 1: vpsrlq $61,<W14=reg256#9%128,>W14right61=reg256#12%128
# asm 2: vpsrlq $61,<W14=%xmm8,>W14right61=%xmm11
vpsrlq $61,%xmm8,%xmm11
# qhasm: r3Sigma1 = r3>>>14
# asm 1: rorx $14,<r3=int64#6,>r3Sigma1=int64#15
# asm 2: rorx $14,<r3=%r9,>r3Sigma1=%rbp
rorx $14,%r9,%rbp
# qhasm: X1sigma0 = X1sigma0 ^ X1left56
# asm 1: vpxor <X1sigma0=reg256#7,<X1left56=reg256#10,>X1sigma0=reg256#7
# asm 2: vpxor <X1sigma0=%ymm6,<X1left56=%ymm9,>X1sigma0=%ymm6
vpxor %ymm6,%ymm9,%ymm6
# qhasm: r7 += maj7
# asm 1: add <maj7=int64#7,<r7=int64#11
# asm 2: add <maj7=%rax,<r7=%r13
add %rax,%r13
# qhasm: r318 = r3>>>18
# asm 1: rorx $18,<r3=int64#6,>r318=int64#7
# asm 2: rorx $18,<r3=%r9,>r318=%rax
rorx $18,%r9,%rax
# qhasm: 4x X1right7 = X1 unsigned>> 7
# asm 1: vpsrlq $7,<X1=reg256#6,>X1right7=reg256#6
# asm 2: vpsrlq $7,<X1=%ymm5,>X1right7=%ymm5
vpsrlq $7,%ymm5,%ymm5
# qhasm: 1x,0 W14sigma1 = W14right19 ^ W14left45
# asm 1: vpxor <W14right19=reg256#8%128,<W14left45=reg256#11%128,>W14sigma1=reg256#8%128
# asm 2: vpxor <W14right19=%xmm7,<W14left45=%xmm10,>W14sigma1=%xmm7
vpxor %xmm7,%xmm10,%xmm7
# qhasm: ch6 &= r3
# asm 1: and <r3=int64#6,<ch6=int64#12
# asm 2: and <r3=%r9,<ch6=%r14
and %r9,%r14
# qhasm: r3Sigma1 ^= r318
# asm 1: xor <r318=int64#7,<r3Sigma1=int64#15
# asm 2: xor <r318=%rax,<r3Sigma1=%rbp
xor %rax,%rbp
# qhasm: r341 = r3>>>41
# asm 1: rorx $41,<r3=int64#6,>r341=int64#7
# asm 2: rorx $41,<r3=%r9,>r341=%rax
rorx $41,%r9,%rax
# qhasm: maj6 &= r7
# asm 1: and <r7=int64#11,<maj6=int64#14
# asm 2: and <r7=%r13,<maj6=%rbx
and %r13,%rbx
# qhasm: X1sigma0 = X1sigma0 ^ X1right7
# asm 1: vpxor <X1sigma0=reg256#7,<X1right7=reg256#6,>X1sigma0=reg256#6
# asm 2: vpxor <X1sigma0=%ymm6,<X1right7=%ymm5,>X1sigma0=%ymm5
vpxor %ymm6,%ymm5,%ymm5
# qhasm: 1x,0 W14sigma1 ^= W14right61
# asm 1: vpxor <W14right61=reg256#12%128,<W14sigma1=reg256#8%128,<W14sigma1=reg256#8%128
# asm 2: vpxor <W14right61=%xmm11,<W14sigma1=%xmm7,<W14sigma1=%xmm7
vpxor %xmm11,%xmm7,%xmm7
# qhasm: 4x X0 = X0 + X1sigma0
# asm 1: vpaddq <X0=reg256#2,<X1sigma0=reg256#6,>X0=reg256#2
# asm 2: vpaddq <X0=%ymm1,<X1sigma0=%ymm5,>X0=%ymm1
vpaddq %ymm1,%ymm5,%ymm1
# qhasm: r3Sigma1 ^= r341
# asm 1: xor <r341=int64#7,<r3Sigma1=int64#15
# asm 2: xor <r341=%rax,<r3Sigma1=%rbp
xor %rax,%rbp
# qhasm: maj6 ^= r0andr1
# asm 1: xor <r0andr1=int64#13,<maj6=int64#14
# asm 2: xor <r0andr1=%r15,<maj6=%rbx
xor %r15,%rbx
# qhasm: 2x,0 W14left3 = W14 << 3
# asm 1: vpsllq $3,<W14=reg256#9%128,>W14left3=reg256#6%128
# asm 2: vpsllq $3,<W14=%xmm8,>W14left3=%xmm5
vpsllq $3,%xmm8,%xmm5
# qhasm: r7Sigma0 = r7>>>28
# asm 1: rorx $28,<r7=int64#11,>r7Sigma0=int64#7
# asm 2: rorx $28,<r7=%r13,>r7Sigma0=%rax
rorx $28,%r13,%rax
# qhasm: ch6 ^= r5
# asm 1: xor <r5=int64#8,<ch6=int64#12
# asm 2: xor <r5=%r10,<ch6=%r14
xor %r10,%r14
# qhasm: r6 += r3Sigma1
# asm 1: add <r3Sigma1=int64#15,<r6=int64#10
# asm 2: add <r3Sigma1=%rbp,<r6=%r12
add %rbp,%r12
# qhasm: 4x X0 = X0 + mem256[&w + 72]
# asm 1: vpaddq <w=stack1280#1,<X0=reg256#2,>X0=reg256#2
# asm 2: vpaddq <w=232(%rsp),<X0=%ymm1,>X0=%ymm1
vpaddq 232(%rsp),%ymm1,%ymm1
# qhasm: r734 = r7>>>34
# asm 1: rorx $34,<r7=int64#11,>r734=int64#13
# asm 2: rorx $34,<r7=%r13,>r734=%r15
rorx $34,%r13,%r15
# qhasm: r5 += wc891011[2]
# asm 1: addq <wc891011=stack256#3,<r5=int64#8
# asm 2: addq <wc891011=80(%rsp),<r5=%r10
addq 80(%rsp),%r10
# qhasm: r6 += ch6
# asm 1: add <ch6=int64#12,<r6=int64#10
# asm 2: add <ch6=%r14,<r6=%r12
add %r14,%r12
# qhasm: r7Sigma0 ^= r734
# asm 1: xor <r734=int64#13,<r7Sigma0=int64#7
# asm 2: xor <r734=%r15,<r7Sigma0=%rax
xor %r15,%rax
# qhasm: r739 = r7>>>39
# asm 1: rorx $39,<r7=int64#11,>r739=int64#12
# asm 2: rorx $39,<r7=%r13,>r739=%r14
rorx $39,%r13,%r14
# qhasm: r2 += r6
# asm 1: add <r6=int64#10,<r2=int64#3
# asm 2: add <r6=%r12,<r2=%rdx
add %r12,%rdx
# qhasm: 1x,0 W14sigma1 ^= W14left3
# asm 1: vpxor <W14left3=reg256#6%128,<W14sigma1=reg256#8%128,<W14sigma1=reg256#8%128
# asm 2: vpxor <W14left3=%xmm5,<W14sigma1=%xmm7,<W14sigma1=%xmm7
vpxor %xmm5,%xmm7,%xmm7
# qhasm: 2x,0 W14right6 = W14 unsigned>> 6
# asm 1: vpsrlq $6,<W14=reg256#9%128,>W14right6=reg256#6%128
# asm 2: vpsrlq $6,<W14=%xmm8,>W14right6=%xmm5
vpsrlq $6,%xmm8,%xmm5
# qhasm: r7Sigma0 ^= r739
# asm 1: xor <r739=int64#12,<r7Sigma0=int64#7
# asm 2: xor <r739=%r14,<r7Sigma0=%rax
xor %r14,%rax
# qhasm: r6 += maj6
# asm 1: add <maj6=int64#14,<r6=int64#10
# asm 2: add <maj6=%rbx,<r6=%r12
add %rbx,%r12
# qhasm: ch5 = r4
# asm 1: mov <r4=int64#9,>ch5=int64#12
# asm 2: mov <r4=%r11,>ch5=%r14
mov %r11,%r14
# qhasm: ch5 ^= r3
# asm 1: xor <r3=int64#6,<ch5=int64#12
# asm 2: xor <r3=%r9,<ch5=%r14
xor %r9,%r14
# qhasm: r6 += r7Sigma0
# asm 1: add <r7Sigma0=int64#7,<r6=int64#10
# asm 2: add <r7Sigma0=%rax,<r6=%r12
add %rax,%r12
# qhasm: r2Sigma1 = r2>>>14
# asm 1: rorx $14,<r2=int64#3,>r2Sigma1=int64#7
# asm 2: rorx $14,<r2=%rdx,>r2Sigma1=%rax
rorx $14,%rdx,%rax
# qhasm: 1x,0 W14sigma1 ^= W14right6
# asm 1: vpxor <W14right6=reg256#6%128,<W14sigma1=reg256#8%128,<W14sigma1=reg256#8%128
# asm 2: vpxor <W14right6=%xmm5,<W14sigma1=%xmm7,<W14sigma1=%xmm7
vpxor %xmm5,%xmm7,%xmm7
# qhasm: ch5 &= r2
# asm 1: and <r2=int64#3,<ch5=int64#12
# asm 2: and <r2=%rdx,<ch5=%r14
and %rdx,%r14
# qhasm: r218 = r2>>>18
# asm 1: rorx $18,<r2=int64#3,>r218=int64#13
# asm 2: rorx $18,<r2=%rdx,>r218=%r15
rorx $18,%rdx,%r15
# qhasm: r241 = r2>>>41
# asm 1: rorx $41,<r2=int64#3,>r241=int64#14
# asm 2: rorx $41,<r2=%rdx,>r241=%rbx
rorx $41,%rdx,%rbx
# qhasm: 4x X0 = W14sigma1 + X0
# asm 1: vpaddq <W14sigma1=reg256#8,<X0=reg256#2,>X0=reg256#2
# asm 2: vpaddq <W14sigma1=%ymm7,<X0=%ymm1,>X0=%ymm1
vpaddq %ymm7,%ymm1,%ymm1
# qhasm: ch5 ^= r4
# asm 1: xor <r4=int64#9,<ch5=int64#12
# asm 2: xor <r4=%r11,<ch5=%r14
xor %r11,%r14
# qhasm: r2Sigma1 ^= r218
# asm 1: xor <r218=int64#13,<r2Sigma1=int64#7
# asm 2: xor <r218=%r15,<r2Sigma1=%rax
xor %r15,%rax
# qhasm: 2x,0 W0right19 = X0 unsigned>> 19
# asm 1: vpsrlq $19,<X0=reg256#2%128,>W0right19=reg256#6%128
# asm 2: vpsrlq $19,<X0=%xmm1,>W0right19=%xmm5
vpsrlq $19,%xmm1,%xmm5
# qhasm: r6Sigma0 = r6>>>28
# asm 1: rorx $28,<r6=int64#10,>r6Sigma0=int64#13
# asm 2: rorx $28,<r6=%r12,>r6Sigma0=%r15
rorx $28,%r12,%r15
# qhasm: r5 += ch5
# asm 1: add <ch5=int64#12,<r5=int64#8
# asm 2: add <ch5=%r14,<r5=%r10
add %r14,%r10
# qhasm: 2x,0 W0left45 = X0 << 45
# asm 1: vpsllq $45,<X0=reg256#2%128,>W0left45=reg256#7%128
# asm 2: vpsllq $45,<X0=%xmm1,>W0left45=%xmm6
vpsllq $45,%xmm1,%xmm6
# qhasm: r634 = r6>>>34
# asm 1: rorx $34,<r6=int64#10,>r634=int64#12
# asm 2: rorx $34,<r6=%r12,>r634=%r14
rorx $34,%r12,%r14
# qhasm: r2Sigma1 ^= r241
# asm 1: xor <r241=int64#14,<r2Sigma1=int64#7
# asm 2: xor <r241=%rbx,<r2Sigma1=%rax
xor %rbx,%rax
# qhasm: maj4 = r7
# asm 1: mov <r7=int64#11,>maj4=int64#14
# asm 2: mov <r7=%r13,>maj4=%rbx
mov %r13,%rbx
# qhasm: X5 = mem256[&w + 40]
# asm 1: vmovupd <w=stack1280#1,>X5=reg256#8
# asm 2: vmovupd <w=200(%rsp),>X5=%ymm7
vmovupd 200(%rsp),%ymm7
# qhasm: maj4 ^= r6
# asm 1: xor <r6=int64#10,<maj4=int64#14
# asm 2: xor <r6=%r12,<maj4=%rbx
xor %r12,%rbx
# qhasm: 2x,0 W0right61 = X0 unsigned>> 61
# asm 1: vpsrlq $61,<X0=reg256#2%128,>W0right61=reg256#9%128
# asm 2: vpsrlq $61,<X0=%xmm1,>W0right61=%xmm8
vpsrlq $61,%xmm1,%xmm8
# qhasm: 1x,0 W0sigma1 = W0right19 ^ W0left45
# asm 1: vpxor <W0right19=reg256#6%128,<W0left45=reg256#7%128,>W0sigma1=reg256#6%128
# asm 2: vpxor <W0right19=%xmm5,<W0left45=%xmm6,>W0sigma1=%xmm5
vpxor %xmm5,%xmm6,%xmm5
# qhasm: r6Sigma0 ^= r634
# asm 1: xor <r634=int64#12,<r6Sigma0=int64#13
# asm 2: xor <r634=%r14,<r6Sigma0=%r15
xor %r14,%r15
# qhasm: 1x,0 W0sigma1 ^= W0right61
# asm 1: vpxor <W0right61=reg256#9%128,<W0sigma1=reg256#6%128,<W0sigma1=reg256#6%128
# asm 2: vpxor <W0right61=%xmm8,<W0sigma1=%xmm5,<W0sigma1=%xmm5
vpxor %xmm8,%xmm5,%xmm5
# qhasm: r639 = r6>>>39
# asm 1: rorx $39,<r6=int64#10,>r639=int64#12
# asm 2: rorx $39,<r6=%r12,>r639=%r14
rorx $39,%r12,%r14
# qhasm: 2x,0 W0left3 = X0 << 3
# asm 1: vpsllq $3,<X0=reg256#2%128,>W0left3=reg256#7%128
# asm 2: vpsllq $3,<X0=%xmm1,>W0left3=%xmm6
vpsllq $3,%xmm1,%xmm6
# qhasm: r6Sigma0 ^= r639
# asm 1: xor <r639=int64#12,<r6Sigma0=int64#13
# asm 2: xor <r639=%r14,<r6Sigma0=%r15
xor %r14,%r15
# qhasm: r5 += r2Sigma1
# asm 1: add <r2Sigma1=int64#7,<r5=int64#8
# asm 2: add <r2Sigma1=%rax,<r5=%r10
add %rax,%r10
# qhasm: 2x,0 W0right6 = X0 unsigned>> 6
# asm 1: vpsrlq $6,<X0=reg256#2%128,>W0right6=reg256#9%128
# asm 2: vpsrlq $6,<X0=%xmm1,>W0right6=%xmm8
vpsrlq $6,%xmm1,%xmm8
# qhasm: 1x,0 W0sigma1 ^= W0left3
# asm 1: vpxor <W0left3=reg256#7%128,<W0sigma1=reg256#6%128,<W0sigma1=reg256#6%128
# asm 2: vpxor <W0left3=%xmm6,<W0sigma1=%xmm5,<W0sigma1=%xmm5
vpxor %xmm6,%xmm5,%xmm5
# qhasm: r1 += r5
# asm 1: add <r5=int64#8,<r1=int64#4
# asm 2: add <r5=%r10,<r1=%rcx
add %r10,%rcx
# qhasm: r6andr7 = r7
# asm 1: mov <r7=int64#11,>r6andr7=int64#7
# asm 2: mov <r7=%r13,>r6andr7=%rax
mov %r13,%rax
# qhasm: r6andr7 &= r6
# asm 1: and <r6=int64#10,<r6andr7=int64#7
# asm 2: and <r6=%r12,<r6andr7=%rax
and %r12,%rax
# qhasm: r1Sigma1 = r1>>>14
# asm 1: rorx $14,<r1=int64#4,>r1Sigma1=int64#12
# asm 2: rorx $14,<r1=%rcx,>r1Sigma1=%r14
rorx $14,%rcx,%r14
# qhasm: r5 += r6Sigma0
# asm 1: add <r6Sigma0=int64#13,<r5=int64#8
# asm 2: add <r6Sigma0=%r15,<r5=%r10
add %r15,%r10
# qhasm: 1x,0 W0sigma1 ^= W0right6
# asm 1: vpxor <W0right6=reg256#9%128,<W0sigma1=reg256#6%128,<W0sigma1=reg256#6%128
# asm 2: vpxor <W0right6=%xmm8,<W0sigma1=%xmm5,<W0sigma1=%xmm5
vpxor %xmm8,%xmm5,%xmm5
# qhasm: maj5 = r0
# asm 1: mov <r0=int64#2,>maj5=int64#13
# asm 2: mov <r0=%rsi,>maj5=%r15
mov %rsi,%r15
# qhasm: maj5 &= maj4
# asm 1: and <maj4=int64#14,<maj5=int64#13
# asm 2: and <maj4=%rbx,<maj5=%r15
and %rbx,%r15
# qhasm: W0sigma1 = W0sigma1[2,3,0,1]
# asm 1: vpermq $0x4e,<W0sigma1=reg256#6,>W0sigma1=reg256#6
# asm 2: vpermq $0x4e,<W0sigma1=%ymm5,>W0sigma1=%ymm5
vpermq $0x4e,%ymm5,%ymm5
# qhasm: maj5 ^= r6andr7
# asm 1: xor <r6andr7=int64#7,<maj5=int64#13
# asm 2: xor <r6andr7=%rax,<maj5=%r15
xor %rax,%r15
# qhasm: ch4 = r3
# asm 1: mov <r3=int64#6,>ch4=int64#15
# asm 2: mov <r3=%r9,>ch4=%rbp
mov %r9,%rbp
# qhasm: 4x X5right1 = X5 unsigned>> 1
# asm 1: vpsrlq $1,<X5=reg256#8,>X5right1=reg256#7
# asm 2: vpsrlq $1,<X5=%ymm7,>X5right1=%ymm6
vpsrlq $1,%ymm7,%ymm6
# qhasm: ch4 ^= r2
# asm 1: xor <r2=int64#3,<ch4=int64#15
# asm 2: xor <r2=%rdx,<ch4=%rbp
xor %rdx,%rbp
# qhasm: r5 += maj5
# asm 1: add <maj5=int64#13,<r5=int64#8
# asm 2: add <maj5=%r15,<r5=%r10
add %r15,%r10
# qhasm: r118 = r1>>>18
# asm 1: rorx $18,<r1=int64#4,>r118=int64#13
# asm 2: rorx $18,<r1=%rcx,>r118=%r15
rorx $18,%rcx,%r15
# qhasm: ch4 &= r1
# asm 1: and <r1=int64#4,<ch4=int64#15
# asm 2: and <r1=%rcx,<ch4=%rbp
and %rcx,%rbp
# qhasm: maj4 &= r5
# asm 1: and <r5=int64#8,<maj4=int64#14
# asm 2: and <r5=%r10,<maj4=%rbx
and %r10,%rbx
# qhasm: ch4 ^= r3
# asm 1: xor <r3=int64#6,<ch4=int64#15
# asm 2: xor <r3=%r9,<ch4=%rbp
xor %r9,%rbp
# qhasm: r4 += wc891011[3]
# asm 1: addq <wc891011=stack256#3,<r4=int64#9
# asm 2: addq <wc891011=88(%rsp),<r4=%r11
addq 88(%rsp),%r11
# qhasm: r1Sigma1 ^= r118
# asm 1: xor <r118=int64#13,<r1Sigma1=int64#12
# asm 2: xor <r118=%r15,<r1Sigma1=%r14
xor %r15,%r14
# qhasm: r141 = r1>>>41
# asm 1: rorx $41,<r1=int64#4,>r141=int64#13
# asm 2: rorx $41,<r1=%rcx,>r141=%r15
rorx $41,%rcx,%r15
# qhasm: 4x X0 = X0 + W0sigma1
# asm 1: vpaddq <X0=reg256#2,<W0sigma1=reg256#6,>X0=reg256#2
# asm 2: vpaddq <X0=%ymm1,<W0sigma1=%ymm5,>X0=%ymm1
vpaddq %ymm1,%ymm5,%ymm1
# qhasm: r4 += ch4
# asm 1: add <ch4=int64#15,<r4=int64#9
# asm 2: add <ch4=%rbp,<r4=%r11
add %rbp,%r11
# qhasm: maj4 ^= r6andr7
# asm 1: xor <r6andr7=int64#7,<maj4=int64#14
# asm 2: xor <r6andr7=%rax,<maj4=%rbx
xor %rax,%rbx
# qhasm: r5Sigma0 = r5>>>28
# asm 1: rorx $28,<r5=int64#8,>r5Sigma0=int64#7
# asm 2: rorx $28,<r5=%r10,>r5Sigma0=%rax
rorx $28,%r10,%rax
# qhasm: 4x D0 = X0 + mem256[constants + 128]
# asm 1: vpaddq 128(<constants=int64#5),<X0=reg256#2,>D0=reg256#6
# asm 2: vpaddq 128(<constants=%r8),<X0=%ymm1,>D0=%ymm5
vpaddq 128(%r8),%ymm1,%ymm5
# qhasm: r1Sigma1 ^= r141
# asm 1: xor <r141=int64#13,<r1Sigma1=int64#12
# asm 2: xor <r141=%r15,<r1Sigma1=%r14
xor %r15,%r14
# qhasm: r534 = r5>>>34
# asm 1: rorx $34,<r5=int64#8,>r534=int64#13
# asm 2: rorx $34,<r5=%r10,>r534=%r15
rorx $34,%r10,%r15
# qhasm: mem256[&w + 128] = X0
# asm 1: vmovupd <X0=reg256#2,<w=stack1280#1
# asm 2: vmovupd <X0=%ymm1,<w=288(%rsp)
vmovupd %ymm1,288(%rsp)
# qhasm: r4 += r1Sigma1
# asm 1: add <r1Sigma1=int64#12,<r4=int64#9
# asm 2: add <r1Sigma1=%r14,<r4=%r11
add %r14,%r11
# qhasm: r5Sigma0 ^= r534
# asm 1: xor <r534=int64#13,<r5Sigma0=int64#7
# asm 2: xor <r534=%r15,<r5Sigma0=%rax
xor %r15,%rax
# qhasm: r539 = r5>>>39
# asm 1: rorx $39,<r5=int64#8,>r539=int64#12
# asm 2: rorx $39,<r5=%r10,>r539=%r14
rorx $39,%r10,%r14
# qhasm: r3 += wc12131415[0]
# asm 1: addq <wc12131415=stack256#5,<r3=int64#6
# asm 2: addq <wc12131415=128(%rsp),<r3=%r9
addq 128(%rsp),%r9
# qhasm: mem256[&w + 0] = X0
# asm 1: vmovupd <X0=reg256#2,<w=stack1280#1
# asm 2: vmovupd <X0=%ymm1,<w=160(%rsp)
vmovupd %ymm1,160(%rsp)
# qhasm: r0 += r4
# asm 1: add <r4=int64#9,<r0=int64#2
# asm 2: add <r4=%r11,<r0=%rsi
add %r11,%rsi
# qhasm: r4 += maj4
# asm 1: add <maj4=int64#14,<r4=int64#9
# asm 2: add <maj4=%rbx,<r4=%r11
add %rbx,%r11
# qhasm: r5Sigma0 ^= r539
# asm 1: xor <r539=int64#12,<r5Sigma0=int64#7
# asm 2: xor <r539=%r14,<r5Sigma0=%rax
xor %r14,%rax
# qhasm: constants += 128
# asm 1: add $128,<constants=int64#5
# asm 2: add $128,<constants=%r8
add $128,%r8
# qhasm: wc0123 = D0
# asm 1: vmovapd <D0=reg256#6,>wc0123=stack256#3
# asm 2: vmovapd <D0=%ymm5,>wc0123=64(%rsp)
vmovapd %ymm5,64(%rsp)
# qhasm: W2 = mem128[&w + 16],0
# asm 1: vmovupd <w=stack1280#1,>W2=reg256#6%128
# asm 2: vmovupd <w=176(%rsp),>W2=%xmm5
vmovupd 176(%rsp),%xmm5
# qhasm: r4 += r5Sigma0
# asm 1: add <r5Sigma0=int64#7,<r4=int64#9
# asm 2: add <r5Sigma0=%rax,<r4=%r11
add %rax,%r11
# qhasm: r0Sigma1 = r0>>>14
# asm 1: rorx $14,<r0=int64#2,>r0Sigma1=int64#7
# asm 2: rorx $14,<r0=%rsi,>r0Sigma1=%rax
rorx $14,%rsi,%rax
# qhasm: ch3 = r2
# asm 1: mov <r2=int64#3,>ch3=int64#12
# asm 2: mov <r2=%rdx,>ch3=%r14
mov %rdx,%r14
# qhasm: ch3 ^= r1
# asm 1: xor <r1=int64#4,<ch3=int64#12
# asm 2: xor <r1=%rcx,<ch3=%r14
xor %rcx,%r14
# qhasm: 4x X5left63 = X5 << 63
# asm 1: vpsllq $63,<X5=reg256#8,>X5left63=reg256#9
# asm 2: vpsllq $63,<X5=%ymm7,>X5left63=%ymm8
vpsllq $63,%ymm7,%ymm8
# qhasm: r018 = r0>>>18
# asm 1: rorx $18,<r0=int64#2,>r018=int64#13
# asm 2: rorx $18,<r0=%rsi,>r018=%r15
rorx $18,%rsi,%r15
# qhasm: maj2 = r5
# asm 1: mov <r5=int64#8,>maj2=int64#14
# asm 2: mov <r5=%r10,>maj2=%rbx
mov %r10,%rbx
# qhasm: ch3 &= r0
# asm 1: and <r0=int64#2,<ch3=int64#12
# asm 2: and <r0=%rsi,<ch3=%r14
and %rsi,%r14
# qhasm: maj2 ^= r4
# asm 1: xor <r4=int64#9,<maj2=int64#14
# asm 2: xor <r4=%r11,<maj2=%rbx
xor %r11,%rbx
# qhasm: r041 = r0>>>41
# asm 1: rorx $41,<r0=int64#2,>r041=int64#15
# asm 2: rorx $41,<r0=%rsi,>r041=%rbp
rorx $41,%rsi,%rbp
# qhasm: X5sigma0 = X5right1 ^ X5left63
# asm 1: vpxor <X5right1=reg256#7,<X5left63=reg256#9,>X5sigma0=reg256#7
# asm 2: vpxor <X5right1=%ymm6,<X5left63=%ymm8,>X5sigma0=%ymm6
vpxor %ymm6,%ymm8,%ymm6
# qhasm: r0Sigma1 ^= r018
# asm 1: xor <r018=int64#13,<r0Sigma1=int64#7
# asm 2: xor <r018=%r15,<r0Sigma1=%rax
xor %r15,%rax
# qhasm: ch3 ^= r2
# asm 1: xor <r2=int64#3,<ch3=int64#12
# asm 2: xor <r2=%rdx,<ch3=%r14
xor %rdx,%r14
# qhasm: 4x X5right8 = X5 unsigned>> 8
# asm 1: vpsrlq $8,<X5=reg256#8,>X5right8=reg256#9
# asm 2: vpsrlq $8,<X5=%ymm7,>X5right8=%ymm8
vpsrlq $8,%ymm7,%ymm8
# qhasm: r4Sigma0 = r4>>>28
# asm 1: rorx $28,<r4=int64#9,>r4Sigma0=int64#13
# asm 2: rorx $28,<r4=%r11,>r4Sigma0=%r15
rorx $28,%r11,%r15
# qhasm: r0Sigma1 ^= r041
# asm 1: xor <r041=int64#15,<r0Sigma1=int64#7
# asm 2: xor <r041=%rbp,<r0Sigma1=%rax
xor %rbp,%rax
# qhasm: r3 += ch3
# asm 1: add <ch3=int64#12,<r3=int64#6
# asm 2: add <ch3=%r14,<r3=%r9
add %r14,%r9
# qhasm: r434 = r4>>>34
# asm 1: rorx $34,<r4=int64#9,>r434=int64#12
# asm 2: rorx $34,<r4=%r11,>r434=%r14
rorx $34,%r11,%r14
# qhasm: X5sigma0 = X5sigma0 ^ X5right8
# asm 1: vpxor <X5sigma0=reg256#7,<X5right8=reg256#9,>X5sigma0=reg256#7
# asm 2: vpxor <X5sigma0=%ymm6,<X5right8=%ymm8,>X5sigma0=%ymm6
vpxor %ymm6,%ymm8,%ymm6
# qhasm: r3 += r0Sigma1
# asm 1: add <r0Sigma1=int64#7,<r3=int64#6
# asm 2: add <r0Sigma1=%rax,<r3=%r9
add %rax,%r9
# qhasm: maj3 = r6
# asm 1: mov <r6=int64#10,>maj3=int64#7
# asm 2: mov <r6=%r12,>maj3=%rax
mov %r12,%rax
# qhasm: maj3 &= maj2
# asm 1: and <maj2=int64#14,<maj3=int64#7
# asm 2: and <maj2=%rbx,<maj3=%rax
and %rbx,%rax
# qhasm: r4Sigma0 ^= r434
# asm 1: xor <r434=int64#12,<r4Sigma0=int64#13
# asm 2: xor <r434=%r14,<r4Sigma0=%r15
xor %r14,%r15
# qhasm: r439 = r4>>>39
# asm 1: rorx $39,<r4=int64#9,>r439=int64#12
# asm 2: rorx $39,<r4=%r11,>r439=%r14
rorx $39,%r11,%r14
# qhasm: 2x,0 W2right19 = W2 unsigned>> 19
# asm 1: vpsrlq $19,<W2=reg256#6%128,>W2right19=reg256#9%128
# asm 2: vpsrlq $19,<W2=%xmm5,>W2right19=%xmm8
vpsrlq $19,%xmm5,%xmm8
# qhasm: 4x X5left56 = X5 << 56
# asm 1: vpsllq $56,<X5=reg256#8,>X5left56=reg256#10
# asm 2: vpsllq $56,<X5=%ymm7,>X5left56=%ymm9
vpsllq $56,%ymm7,%ymm9
# qhasm: r7 += r3
# asm 1: add <r3=int64#6,<r7=int64#11
# asm 2: add <r3=%r9,<r7=%r13
add %r9,%r13
# qhasm: r4Sigma0 ^= r439
# asm 1: xor <r439=int64#12,<r4Sigma0=int64#13
# asm 2: xor <r439=%r14,<r4Sigma0=%r15
xor %r14,%r15
# qhasm: r2 += wc12131415[1]
# asm 1: addq <wc12131415=stack256#5,<r2=int64#3
# asm 2: addq <wc12131415=136(%rsp),<r2=%rdx
addq 136(%rsp),%rdx
# qhasm: r4andr5 = r5
# asm 1: mov <r5=int64#8,>r4andr5=int64#12
# asm 2: mov <r5=%r10,>r4andr5=%r14
mov %r10,%r14
# qhasm: r4andr5 &= r4
# asm 1: and <r4=int64#9,<r4andr5=int64#12
# asm 2: and <r4=%r11,<r4andr5=%r14
and %r11,%r14
# qhasm: 2x,0 W2left45 = W2 << 45
# asm 1: vpsllq $45,<W2=reg256#6%128,>W2left45=reg256#11%128
# asm 2: vpsllq $45,<W2=%xmm5,>W2left45=%xmm10
vpsllq $45,%xmm5,%xmm10
# qhasm: r3 += r4Sigma0
# asm 1: add <r4Sigma0=int64#13,<r3=int64#6
# asm 2: add <r4Sigma0=%r15,<r3=%r9
add %r15,%r9
# qhasm: maj3 ^= r4andr5
# asm 1: xor <r4andr5=int64#12,<maj3=int64#7
# asm 2: xor <r4andr5=%r14,<maj3=%rax
xor %r14,%rax
# qhasm: ch2 = r1
# asm 1: mov <r1=int64#4,>ch2=int64#13
# asm 2: mov <r1=%rcx,>ch2=%r15
mov %rcx,%r15
# qhasm: ch2 ^= r0
# asm 1: xor <r0=int64#2,<ch2=int64#13
# asm 2: xor <r0=%rsi,<ch2=%r15
xor %rsi,%r15
# qhasm: 2x,0 W2right61 = W2 unsigned>> 61
# asm 1: vpsrlq $61,<W2=reg256#6%128,>W2right61=reg256#12%128
# asm 2: vpsrlq $61,<W2=%xmm5,>W2right61=%xmm11
vpsrlq $61,%xmm5,%xmm11
# qhasm: r7Sigma1 = r7>>>14
# asm 1: rorx $14,<r7=int64#11,>r7Sigma1=int64#15
# asm 2: rorx $14,<r7=%r13,>r7Sigma1=%rbp
rorx $14,%r13,%rbp
# qhasm: X5sigma0 = X5sigma0 ^ X5left56
# asm 1: vpxor <X5sigma0=reg256#7,<X5left56=reg256#10,>X5sigma0=reg256#7
# asm 2: vpxor <X5sigma0=%ymm6,<X5left56=%ymm9,>X5sigma0=%ymm6
vpxor %ymm6,%ymm9,%ymm6
# qhasm: r3 += maj3
# asm 1: add <maj3=int64#7,<r3=int64#6
# asm 2: add <maj3=%rax,<r3=%r9
add %rax,%r9
# qhasm: 4x X5right7 = X5 unsigned>> 7
# asm 1: vpsrlq $7,<X5=reg256#8,>X5right7=reg256#8
# asm 2: vpsrlq $7,<X5=%ymm7,>X5right7=%ymm7
vpsrlq $7,%ymm7,%ymm7
# qhasm: r718 = r7>>>18
# asm 1: rorx $18,<r7=int64#11,>r718=int64#7
# asm 2: rorx $18,<r7=%r13,>r718=%rax
rorx $18,%r13,%rax
# qhasm: 1x,0 W2sigma1 = W2right19 ^ W2left45
# asm 1: vpxor <W2right19=reg256#9%128,<W2left45=reg256#11%128,>W2sigma1=reg256#9%128
# asm 2: vpxor <W2right19=%xmm8,<W2left45=%xmm10,>W2sigma1=%xmm8
vpxor %xmm8,%xmm10,%xmm8
# qhasm: ch2 &= r7
# asm 1: and <r7=int64#11,<ch2=int64#13
# asm 2: and <r7=%r13,<ch2=%r15
and %r13,%r15
# qhasm: r7Sigma1 ^= r718
# asm 1: xor <r718=int64#7,<r7Sigma1=int64#15
# asm 2: xor <r718=%rax,<r7Sigma1=%rbp
xor %rax,%rbp
# qhasm: X5sigma0 = X5sigma0 ^ X5right7
# asm 1: vpxor <X5sigma0=reg256#7,<X5right7=reg256#8,>X5sigma0=reg256#7
# asm 2: vpxor <X5sigma0=%ymm6,<X5right7=%ymm7,>X5sigma0=%ymm6
vpxor %ymm6,%ymm7,%ymm6
# qhasm: r741 = r7>>>41
# asm 1: rorx $41,<r7=int64#11,>r741=int64#7
# asm 2: rorx $41,<r7=%r13,>r741=%rax
rorx $41,%r13,%rax
# qhasm: maj2 &= r3
# asm 1: and <r3=int64#6,<maj2=int64#14
# asm 2: and <r3=%r9,<maj2=%rbx
and %r9,%rbx
# qhasm: 1x,0 W2sigma1 ^= W2right61
# asm 1: vpxor <W2right61=reg256#12%128,<W2sigma1=reg256#9%128,<W2sigma1=reg256#9%128
# asm 2: vpxor <W2right61=%xmm11,<W2sigma1=%xmm8,<W2sigma1=%xmm8
vpxor %xmm11,%xmm8,%xmm8
# qhasm: 4x X4 = X4 + X5sigma0
# asm 1: vpaddq <X4=reg256#3,<X5sigma0=reg256#7,>X4=reg256#3
# asm 2: vpaddq <X4=%ymm2,<X5sigma0=%ymm6,>X4=%ymm2
vpaddq %ymm2,%ymm6,%ymm2
# qhasm: r7Sigma1 ^= r741
# asm 1: xor <r741=int64#7,<r7Sigma1=int64#15
# asm 2: xor <r741=%rax,<r7Sigma1=%rbp
xor %rax,%rbp
# qhasm: maj2 ^= r4andr5
# asm 1: xor <r4andr5=int64#12,<maj2=int64#14
# asm 2: xor <r4andr5=%r14,<maj2=%rbx
xor %r14,%rbx
# qhasm: 2x,0 W2left3 = W2 << 3
# asm 1: vpsllq $3,<W2=reg256#6%128,>W2left3=reg256#7%128
# asm 2: vpsllq $3,<W2=%xmm5,>W2left3=%xmm6
vpsllq $3,%xmm5,%xmm6
# qhasm: r3Sigma0 = r3>>>28
# asm 1: rorx $28,<r3=int64#6,>r3Sigma0=int64#7
# asm 2: rorx $28,<r3=%r9,>r3Sigma0=%rax
rorx $28,%r9,%rax
# qhasm: ch2 ^= r1
# asm 1: xor <r1=int64#4,<ch2=int64#13
# asm 2: xor <r1=%rcx,<ch2=%r15
xor %rcx,%r15
# qhasm: r2 += r7Sigma1
# asm 1: add <r7Sigma1=int64#15,<r2=int64#3
# asm 2: add <r7Sigma1=%rbp,<r2=%rdx
add %rbp,%rdx
# qhasm: r334 = r3>>>34
# asm 1: rorx $34,<r3=int64#6,>r334=int64#12
# asm 2: rorx $34,<r3=%r9,>r334=%r14
rorx $34,%r9,%r14
# qhasm: 4x X4 = X4 + mem256[&w + 104]
# asm 1: vpaddq <w=stack1280#1,<X4=reg256#3,>X4=reg256#3
# asm 2: vpaddq <w=264(%rsp),<X4=%ymm2,>X4=%ymm2
vpaddq 264(%rsp),%ymm2,%ymm2
# qhasm: r1 += wc12131415[2]
# asm 1: addq <wc12131415=stack256#5,<r1=int64#4
# asm 2: addq <wc12131415=144(%rsp),<r1=%rcx
addq 144(%rsp),%rcx
# qhasm: r2 += ch2
# asm 1: add <ch2=int64#13,<r2=int64#3
# asm 2: add <ch2=%r15,<r2=%rdx
add %r15,%rdx
# qhasm: r3Sigma0 ^= r334
# asm 1: xor <r334=int64#12,<r3Sigma0=int64#7
# asm 2: xor <r334=%r14,<r3Sigma0=%rax
xor %r14,%rax
# qhasm: r339 = r3>>>39
# asm 1: rorx $39,<r3=int64#6,>r339=int64#12
# asm 2: rorx $39,<r3=%r9,>r339=%r14
rorx $39,%r9,%r14
# qhasm: r6 += r2
# asm 1: add <r2=int64#3,<r6=int64#10
# asm 2: add <r2=%rdx,<r6=%r12
add %rdx,%r12
# qhasm: 1x,0 W2sigma1 ^= W2left3
# asm 1: vpxor <W2left3=reg256#7%128,<W2sigma1=reg256#9%128,<W2sigma1=reg256#9%128
# asm 2: vpxor <W2left3=%xmm6,<W2sigma1=%xmm8,<W2sigma1=%xmm8
vpxor %xmm6,%xmm8,%xmm8
# qhasm: 2x,0 W2right6 = W2 unsigned>> 6
# asm 1: vpsrlq $6,<W2=reg256#6%128,>W2right6=reg256#6%128
# asm 2: vpsrlq $6,<W2=%xmm5,>W2right6=%xmm5
vpsrlq $6,%xmm5,%xmm5
# qhasm: r3Sigma0 ^= r339
# asm 1: xor <r339=int64#12,<r3Sigma0=int64#7
# asm 2: xor <r339=%r14,<r3Sigma0=%rax
xor %r14,%rax
# qhasm: r2 += maj2
# asm 1: add <maj2=int64#14,<r2=int64#3
# asm 2: add <maj2=%rbx,<r2=%rdx
add %rbx,%rdx
# qhasm: ch1 = r0
# asm 1: mov <r0=int64#2,>ch1=int64#12
# asm 2: mov <r0=%rsi,>ch1=%r14
mov %rsi,%r14
# qhasm: ch1 ^= r7
# asm 1: xor <r7=int64#11,<ch1=int64#12
# asm 2: xor <r7=%r13,<ch1=%r14
xor %r13,%r14
# qhasm: r2 += r3Sigma0
# asm 1: add <r3Sigma0=int64#7,<r2=int64#3
# asm 2: add <r3Sigma0=%rax,<r2=%rdx
add %rax,%rdx
# qhasm: r6Sigma1 = r6>>>14
# asm 1: rorx $14,<r6=int64#10,>r6Sigma1=int64#7
# asm 2: rorx $14,<r6=%r12,>r6Sigma1=%rax
rorx $14,%r12,%rax
# qhasm: 1x,0 W2sigma1 ^= W2right6
# asm 1: vpxor <W2right6=reg256#6%128,<W2sigma1=reg256#9%128,<W2sigma1=reg256#9%128
# asm 2: vpxor <W2right6=%xmm5,<W2sigma1=%xmm8,<W2sigma1=%xmm8
vpxor %xmm5,%xmm8,%xmm8
# qhasm: ch1 &= r6
# asm 1: and <r6=int64#10,<ch1=int64#12
# asm 2: and <r6=%r12,<ch1=%r14
and %r12,%r14
# qhasm: r618 = r6>>>18
# asm 1: rorx $18,<r6=int64#10,>r618=int64#13
# asm 2: rorx $18,<r6=%r12,>r618=%r15
rorx $18,%r12,%r15
# qhasm: r641 = r6>>>41
# asm 1: rorx $41,<r6=int64#10,>r641=int64#14
# asm 2: rorx $41,<r6=%r12,>r641=%rbx
rorx $41,%r12,%rbx
# qhasm: 4x X4 = W2sigma1 + X4
# asm 1: vpaddq <W2sigma1=reg256#9,<X4=reg256#3,>X4=reg256#3
# asm 2: vpaddq <W2sigma1=%ymm8,<X4=%ymm2,>X4=%ymm2
vpaddq %ymm8,%ymm2,%ymm2
# qhasm: ch1 ^= r0
# asm 1: xor <r0=int64#2,<ch1=int64#12
# asm 2: xor <r0=%rsi,<ch1=%r14
xor %rsi,%r14
# qhasm: r6Sigma1 ^= r618
# asm 1: xor <r618=int64#13,<r6Sigma1=int64#7
# asm 2: xor <r618=%r15,<r6Sigma1=%rax
xor %r15,%rax
# qhasm: 2x,0 W4right19 = X4 unsigned>> 19
# asm 1: vpsrlq $19,<X4=reg256#3%128,>W4right19=reg256#6%128
# asm 2: vpsrlq $19,<X4=%xmm2,>W4right19=%xmm5
vpsrlq $19,%xmm2,%xmm5
# qhasm: r1 += ch1
# asm 1: add <ch1=int64#12,<r1=int64#4
# asm 2: add <ch1=%r14,<r1=%rcx
add %r14,%rcx
# qhasm: r2Sigma0 = r2>>>28
# asm 1: rorx $28,<r2=int64#3,>r2Sigma0=int64#12
# asm 2: rorx $28,<r2=%rdx,>r2Sigma0=%r14
rorx $28,%rdx,%r14
# qhasm: 2x,0 W4left45 = X4 << 45
# asm 1: vpsllq $45,<X4=reg256#3%128,>W4left45=reg256#7%128
# asm 2: vpsllq $45,<X4=%xmm2,>W4left45=%xmm6
vpsllq $45,%xmm2,%xmm6
# qhasm: r6Sigma1 ^= r641
# asm 1: xor <r641=int64#14,<r6Sigma1=int64#7
# asm 2: xor <r641=%rbx,<r6Sigma1=%rax
xor %rbx,%rax
# qhasm: r234 = r2>>>34
# asm 1: rorx $34,<r2=int64#3,>r234=int64#13
# asm 2: rorx $34,<r2=%rdx,>r234=%r15
rorx $34,%rdx,%r15
# qhasm: maj0 = r3
# asm 1: mov <r3=int64#6,>maj0=int64#14
# asm 2: mov <r3=%r9,>maj0=%rbx
mov %r9,%rbx
# qhasm: maj0 ^= r2
# asm 1: xor <r2=int64#3,<maj0=int64#14
# asm 2: xor <r2=%rdx,<maj0=%rbx
xor %rdx,%rbx
# qhasm: 2x,0 W4right61 = X4 unsigned>> 61
# asm 1: vpsrlq $61,<X4=reg256#3%128,>W4right61=reg256#8%128
# asm 2: vpsrlq $61,<X4=%xmm2,>W4right61=%xmm7
vpsrlq $61,%xmm2,%xmm7
# qhasm: X9 = mem256[&w + 72]
# asm 1: vmovupd <w=stack1280#1,>X9=reg256#9
# asm 2: vmovupd <w=232(%rsp),>X9=%ymm8
vmovupd 232(%rsp),%ymm8
# qhasm: r2Sigma0 ^= r234
# asm 1: xor <r234=int64#13,<r2Sigma0=int64#12
# asm 2: xor <r234=%r15,<r2Sigma0=%r14
xor %r15,%r14
# qhasm: 1x,0 W4sigma1 = W4right19 ^ W4left45
# asm 1: vpxor <W4right19=reg256#6%128,<W4left45=reg256#7%128,>W4sigma1=reg256#6%128
# asm 2: vpxor <W4right19=%xmm5,<W4left45=%xmm6,>W4sigma1=%xmm5
vpxor %xmm5,%xmm6,%xmm5
# qhasm: r239 = r2>>>39
# asm 1: rorx $39,<r2=int64#3,>r239=int64#13
# asm 2: rorx $39,<r2=%rdx,>r239=%r15
rorx $39,%rdx,%r15
# qhasm: 2x,0 W4left3 = X4 << 3
# asm 1: vpsllq $3,<X4=reg256#3%128,>W4left3=reg256#7%128
# asm 2: vpsllq $3,<X4=%xmm2,>W4left3=%xmm6
vpsllq $3,%xmm2,%xmm6
# qhasm: 1x,0 W4sigma1 ^= W4right61
# asm 1: vpxor <W4right61=reg256#8%128,<W4sigma1=reg256#6%128,<W4sigma1=reg256#6%128
# asm 2: vpxor <W4right61=%xmm7,<W4sigma1=%xmm5,<W4sigma1=%xmm5
vpxor %xmm7,%xmm5,%xmm5
# qhasm: 2x,0 W4right6 = X4 unsigned>> 6
# asm 1: vpsrlq $6,<X4=reg256#3%128,>W4right6=reg256#8%128
# asm 2: vpsrlq $6,<X4=%xmm2,>W4right6=%xmm7
vpsrlq $6,%xmm2,%xmm7
# qhasm: r2Sigma0 ^= r239
# asm 1: xor <r239=int64#13,<r2Sigma0=int64#12
# asm 2: xor <r239=%r15,<r2Sigma0=%r14
xor %r15,%r14
# qhasm: r1 += r6Sigma1
# asm 1: add <r6Sigma1=int64#7,<r1=int64#4
# asm 2: add <r6Sigma1=%rax,<r1=%rcx
add %rax,%rcx
# qhasm: 1x,0 W4sigma1 ^= W4left3
# asm 1: vpxor <W4left3=reg256#7%128,<W4sigma1=reg256#6%128,<W4sigma1=reg256#6%128
# asm 2: vpxor <W4left3=%xmm6,<W4sigma1=%xmm5,<W4sigma1=%xmm5
vpxor %xmm6,%xmm5,%xmm5
# qhasm: r5 += r1
# asm 1: add <r1=int64#4,<r5=int64#8
# asm 2: add <r1=%rcx,<r5=%r10
add %rcx,%r10
# qhasm: r2andr3 = r3
# asm 1: mov <r3=int64#6,>r2andr3=int64#7
# asm 2: mov <r3=%r9,>r2andr3=%rax
mov %r9,%rax
# qhasm: r5Sigma1 = r5>>>14
# asm 1: rorx $14,<r5=int64#8,>r5Sigma1=int64#13
# asm 2: rorx $14,<r5=%r10,>r5Sigma1=%r15
rorx $14,%r10,%r15
# qhasm: r2andr3 &= r2
# asm 1: and <r2=int64#3,<r2andr3=int64#7
# asm 2: and <r2=%rdx,<r2andr3=%rax
and %rdx,%rax
# qhasm: 1x,0 W4sigma1 ^= W4right6
# asm 1: vpxor <W4right6=reg256#8%128,<W4sigma1=reg256#6%128,<W4sigma1=reg256#6%128
# asm 2: vpxor <W4right6=%xmm7,<W4sigma1=%xmm5,<W4sigma1=%xmm5
vpxor %xmm7,%xmm5,%xmm5
# qhasm: r1 += r2Sigma0
# asm 1: add <r2Sigma0=int64#12,<r1=int64#4
# asm 2: add <r2Sigma0=%r14,<r1=%rcx
add %r14,%rcx
# qhasm: maj1 = r4
# asm 1: mov <r4=int64#9,>maj1=int64#12
# asm 2: mov <r4=%r11,>maj1=%r14
mov %r11,%r14
# qhasm: maj1 &= maj0
# asm 1: and <maj0=int64#14,<maj1=int64#12
# asm 2: and <maj0=%rbx,<maj1=%r14
and %rbx,%r14
# qhasm: W4sigma1 = W4sigma1[2,3,0,1]
# asm 1: vpermq $0x4e,<W4sigma1=reg256#6,>W4sigma1=reg256#6
# asm 2: vpermq $0x4e,<W4sigma1=%ymm5,>W4sigma1=%ymm5
vpermq $0x4e,%ymm5,%ymm5
# qhasm: maj1 ^= r2andr3
# asm 1: xor <r2andr3=int64#7,<maj1=int64#12
# asm 2: xor <r2andr3=%rax,<maj1=%r14
xor %rax,%r14
# qhasm: ch0 = r7
# asm 1: mov <r7=int64#11,>ch0=int64#15
# asm 2: mov <r7=%r13,>ch0=%rbp
mov %r13,%rbp
# qhasm: 4x X9right1 = X9 unsigned>> 1
# asm 1: vpsrlq $1,<X9=reg256#9,>X9right1=reg256#7
# asm 2: vpsrlq $1,<X9=%ymm8,>X9right1=%ymm6
vpsrlq $1,%ymm8,%ymm6
# qhasm: ch0 ^= r6
# asm 1: xor <r6=int64#10,<ch0=int64#15
# asm 2: xor <r6=%r12,<ch0=%rbp
xor %r12,%rbp
# qhasm: r1 += maj1
# asm 1: add <maj1=int64#12,<r1=int64#4
# asm 2: add <maj1=%r14,<r1=%rcx
add %r14,%rcx
# qhasm: ch0 &= r5
# asm 1: and <r5=int64#8,<ch0=int64#15
# asm 2: and <r5=%r10,<ch0=%rbp
and %r10,%rbp
# qhasm: r518 = r5>>>18
# asm 1: rorx $18,<r5=int64#8,>r518=int64#12
# asm 2: rorx $18,<r5=%r10,>r518=%r14
rorx $18,%r10,%r14
# qhasm: maj0 &= r1
# asm 1: and <r1=int64#4,<maj0=int64#14
# asm 2: and <r1=%rcx,<maj0=%rbx
and %rcx,%rbx
# qhasm: r0 += wc12131415[3]
# asm 1: addq <wc12131415=stack256#5,<r0=int64#2
# asm 2: addq <wc12131415=152(%rsp),<r0=%rsi
addq 152(%rsp),%rsi
# qhasm: ch0 ^= r7
# asm 1: xor <r7=int64#11,<ch0=int64#15
# asm 2: xor <r7=%r13,<ch0=%rbp
xor %r13,%rbp
# qhasm: r5Sigma1 ^= r518
# asm 1: xor <r518=int64#12,<r5Sigma1=int64#13
# asm 2: xor <r518=%r14,<r5Sigma1=%r15
xor %r14,%r15
# qhasm: 4x X4 = X4 + W4sigma1
# asm 1: vpaddq <X4=reg256#3,<W4sigma1=reg256#6,>X4=reg256#3
# asm 2: vpaddq <X4=%ymm2,<W4sigma1=%ymm5,>X4=%ymm2
vpaddq %ymm2,%ymm5,%ymm2
# qhasm: r541 = r5>>>41
# asm 1: rorx $41,<r5=int64#8,>r541=int64#12
# asm 2: rorx $41,<r5=%r10,>r541=%r14
rorx $41,%r10,%r14
# qhasm: mem256[&w + 32] = X4
# asm 1: vmovupd <X4=reg256#3,<w=stack1280#1
# asm 2: vmovupd <X4=%ymm2,<w=192(%rsp)
vmovupd %ymm2,192(%rsp)
# qhasm: r0 += ch0
# asm 1: add <ch0=int64#15,<r0=int64#2
# asm 2: add <ch0=%rbp,<r0=%rsi
add %rbp,%rsi
# qhasm: maj0 ^= r2andr3
# asm 1: xor <r2andr3=int64#7,<maj0=int64#14
# asm 2: xor <r2andr3=%rax,<maj0=%rbx
xor %rax,%rbx
# qhasm: r1Sigma0 = r1>>>28
# asm 1: rorx $28,<r1=int64#4,>r1Sigma0=int64#7
# asm 2: rorx $28,<r1=%rcx,>r1Sigma0=%rax
rorx $28,%rcx,%rax
# qhasm: 4x D4 = X4 + mem256[constants + 32]
# asm 1: vpaddq 32(<constants=int64#5),<X4=reg256#3,>D4=reg256#6
# asm 2: vpaddq 32(<constants=%r8),<X4=%ymm2,>D4=%ymm5
vpaddq 32(%r8),%ymm2,%ymm5
# qhasm: wc4567 = D4
# asm 1: vmovapd <D4=reg256#6,>wc4567=stack256#4
# asm 2: vmovapd <D4=%ymm5,>wc4567=96(%rsp)
vmovapd %ymm5,96(%rsp)
# qhasm: r5Sigma1 ^= r541
# asm 1: xor <r541=int64#12,<r5Sigma1=int64#13
# asm 2: xor <r541=%r14,<r5Sigma1=%r15
xor %r14,%r15
# qhasm: r134 = r1>>>34
# asm 1: rorx $34,<r1=int64#4,>r134=int64#12
# asm 2: rorx $34,<r1=%rcx,>r134=%r14
rorx $34,%rcx,%r14
# qhasm: r0 += r5Sigma1
# asm 1: add <r5Sigma1=int64#13,<r0=int64#2
# asm 2: add <r5Sigma1=%r15,<r0=%rsi
add %r15,%rsi
# qhasm: r1Sigma0 ^= r134
# asm 1: xor <r134=int64#12,<r1Sigma0=int64#7
# asm 2: xor <r134=%r14,<r1Sigma0=%rax
xor %r14,%rax
# qhasm: r7 += wc0123[0]
# asm 1: addq <wc0123=stack256#3,<r7=int64#11
# asm 2: addq <wc0123=64(%rsp),<r7=%r13
addq 64(%rsp),%r13
# qhasm: r139 = r1>>>39
# asm 1: rorx $39,<r1=int64#4,>r139=int64#12
# asm 2: rorx $39,<r1=%rcx,>r139=%r14
rorx $39,%rcx,%r14
# qhasm: r4 += r0
# asm 1: add <r0=int64#2,<r4=int64#9
# asm 2: add <r0=%rsi,<r4=%r11
add %rsi,%r11
# qhasm: r0 += maj0
# asm 1: add <maj0=int64#14,<r0=int64#2
# asm 2: add <maj0=%rbx,<r0=%rsi
add %rbx,%rsi
# qhasm: r1Sigma0 ^= r139
# asm 1: xor <r139=int64#12,<r1Sigma0=int64#7
# asm 2: xor <r139=%r14,<r1Sigma0=%rax
xor %r14,%rax
# qhasm: r4Sigma1 = r4>>>14
# asm 1: rorx $14,<r4=int64#9,>r4Sigma1=int64#12
# asm 2: rorx $14,<r4=%r11,>r4Sigma1=%r14
rorx $14,%r11,%r14
# qhasm: W6 = mem128[&w + 48],0
# asm 1: vmovupd <w=stack1280#1,>W6=reg256#6%128
# asm 2: vmovupd <w=208(%rsp),>W6=%xmm5
vmovupd 208(%rsp),%xmm5
# qhasm: r0 += r1Sigma0
# asm 1: add <r1Sigma0=int64#7,<r0=int64#2
# asm 2: add <r1Sigma0=%rax,<r0=%rsi
add %rax,%rsi
# qhasm: ch7 = r6
# asm 1: mov <r6=int64#10,>ch7=int64#7
# asm 2: mov <r6=%r12,>ch7=%rax
mov %r12,%rax
# qhasm: ch7 ^= r5
# asm 1: xor <r5=int64#8,<ch7=int64#7
# asm 2: xor <r5=%r10,<ch7=%rax
xor %r10,%rax
# qhasm: r418 = r4>>>18
# asm 1: rorx $18,<r4=int64#9,>r418=int64#13
# asm 2: rorx $18,<r4=%r11,>r418=%r15
rorx $18,%r11,%r15
# qhasm: 4x X9left63 = X9 << 63
# asm 1: vpsllq $63,<X9=reg256#9,>X9left63=reg256#8
# asm 2: vpsllq $63,<X9=%ymm8,>X9left63=%ymm7
vpsllq $63,%ymm8,%ymm7
# qhasm: ch7 &= r4
# asm 1: and <r4=int64#9,<ch7=int64#7
# asm 2: and <r4=%r11,<ch7=%rax
and %r11,%rax
# qhasm: maj6 = r1
# asm 1: mov <r1=int64#4,>maj6=int64#14
# asm 2: mov <r1=%rcx,>maj6=%rbx
mov %rcx,%rbx
# qhasm: maj6 ^= r0
# asm 1: xor <r0=int64#2,<maj6=int64#14
# asm 2: xor <r0=%rsi,<maj6=%rbx
xor %rsi,%rbx
# qhasm: r441 = r4>>>41
# asm 1: rorx $41,<r4=int64#9,>r441=int64#15
# asm 2: rorx $41,<r4=%r11,>r441=%rbp
rorx $41,%r11,%rbp
# qhasm: X9sigma0 = X9right1 ^ X9left63
# asm 1: vpxor <X9right1=reg256#7,<X9left63=reg256#8,>X9sigma0=reg256#7
# asm 2: vpxor <X9right1=%ymm6,<X9left63=%ymm7,>X9sigma0=%ymm6
vpxor %ymm6,%ymm7,%ymm6
# qhasm: r4Sigma1 ^= r418
# asm 1: xor <r418=int64#13,<r4Sigma1=int64#12
# asm 2: xor <r418=%r15,<r4Sigma1=%r14
xor %r15,%r14
# qhasm: 4x X9right8 = X9 unsigned>> 8
# asm 1: vpsrlq $8,<X9=reg256#9,>X9right8=reg256#8
# asm 2: vpsrlq $8,<X9=%ymm8,>X9right8=%ymm7
vpsrlq $8,%ymm8,%ymm7
# qhasm: ch7 ^= r6
# asm 1: xor <r6=int64#10,<ch7=int64#7
# asm 2: xor <r6=%r12,<ch7=%rax
xor %r12,%rax
# qhasm: r4Sigma1 ^= r441
# asm 1: xor <r441=int64#15,<r4Sigma1=int64#12
# asm 2: xor <r441=%rbp,<r4Sigma1=%r14
xor %rbp,%r14
# qhasm: r0Sigma0 = r0>>>28
# asm 1: rorx $28,<r0=int64#2,>r0Sigma0=int64#13
# asm 2: rorx $28,<r0=%rsi,>r0Sigma0=%r15
rorx $28,%rsi,%r15
# qhasm: r7 += ch7
# asm 1: add <ch7=int64#7,<r7=int64#11
# asm 2: add <ch7=%rax,<r7=%r13
add %rax,%r13
# qhasm: X9sigma0 = X9sigma0 ^ X9right8
# asm 1: vpxor <X9sigma0=reg256#7,<X9right8=reg256#8,>X9sigma0=reg256#7
# asm 2: vpxor <X9sigma0=%ymm6,<X9right8=%ymm7,>X9sigma0=%ymm6
vpxor %ymm6,%ymm7,%ymm6
# qhasm: r7 += r4Sigma1
# asm 1: add <r4Sigma1=int64#12,<r7=int64#11
# asm 2: add <r4Sigma1=%r14,<r7=%r13
add %r14,%r13
# qhasm: r034 = r0>>>34
# asm 1: rorx $34,<r0=int64#2,>r034=int64#7
# asm 2: rorx $34,<r0=%rsi,>r034=%rax
rorx $34,%rsi,%rax
# qhasm: maj7 = r2
# asm 1: mov <r2=int64#3,>maj7=int64#12
# asm 2: mov <r2=%rdx,>maj7=%r14
mov %rdx,%r14
# qhasm: maj7 &= maj6
# asm 1: and <maj6=int64#14,<maj7=int64#12
# asm 2: and <maj6=%rbx,<maj7=%r14
and %rbx,%r14
# qhasm: r0Sigma0 ^= r034
# asm 1: xor <r034=int64#7,<r0Sigma0=int64#13
# asm 2: xor <r034=%rax,<r0Sigma0=%r15
xor %rax,%r15
# qhasm: 2x,0 W6right19 = W6 unsigned>> 19
# asm 1: vpsrlq $19,<W6=reg256#6%128,>W6right19=reg256#8%128
# asm 2: vpsrlq $19,<W6=%xmm5,>W6right19=%xmm7
vpsrlq $19,%xmm5,%xmm7
# qhasm: r039 = r0>>>39
# asm 1: rorx $39,<r0=int64#2,>r039=int64#7
# asm 2: rorx $39,<r0=%rsi,>r039=%rax
rorx $39,%rsi,%rax
# qhasm: r3 += r7
# asm 1: add <r7=int64#11,<r3=int64#6
# asm 2: add <r7=%r13,<r3=%r9
add %r13,%r9
# qhasm: r0Sigma0 ^= r039
# asm 1: xor <r039=int64#7,<r0Sigma0=int64#13
# asm 2: xor <r039=%rax,<r0Sigma0=%r15
xor %rax,%r15
# qhasm: 4x X9left56 = X9 << 56
# asm 1: vpsllq $56,<X9=reg256#9,>X9left56=reg256#10
# asm 2: vpsllq $56,<X9=%ymm8,>X9left56=%ymm9
vpsllq $56,%ymm8,%ymm9
# qhasm: r6 += wc0123[1]
# asm 1: addq <wc0123=stack256#3,<r6=int64#10
# asm 2: addq <wc0123=72(%rsp),<r6=%r12
addq 72(%rsp),%r12
# qhasm: r0andr1 = r1
# asm 1: mov <r1=int64#4,>r0andr1=int64#7
# asm 2: mov <r1=%rcx,>r0andr1=%rax
mov %rcx,%rax
# qhasm: r0andr1 &= r0
# asm 1: and <r0=int64#2,<r0andr1=int64#7
# asm 2: and <r0=%rsi,<r0andr1=%rax
and %rsi,%rax
# qhasm: r7 += r0Sigma0
# asm 1: add <r0Sigma0=int64#13,<r7=int64#11
# asm 2: add <r0Sigma0=%r15,<r7=%r13
add %r15,%r13
# qhasm: ch6 = r5
# asm 1: mov <r5=int64#8,>ch6=int64#13
# asm 2: mov <r5=%r10,>ch6=%r15
mov %r10,%r15
# qhasm: 2x,0 W6left45 = W6 << 45
# asm 1: vpsllq $45,<W6=reg256#6%128,>W6left45=reg256#11%128
# asm 2: vpsllq $45,<W6=%xmm5,>W6left45=%xmm10
vpsllq $45,%xmm5,%xmm10
# qhasm: maj7 ^= r0andr1
# asm 1: xor <r0andr1=int64#7,<maj7=int64#12
# asm 2: xor <r0andr1=%rax,<maj7=%r14
xor %rax,%r14
# qhasm: ch6 ^= r4
# asm 1: xor <r4=int64#9,<ch6=int64#13
# asm 2: xor <r4=%r11,<ch6=%r15
xor %r11,%r15
# qhasm: 2x,0 W6right61 = W6 unsigned>> 61
# asm 1: vpsrlq $61,<W6=reg256#6%128,>W6right61=reg256#12%128
# asm 2: vpsrlq $61,<W6=%xmm5,>W6right61=%xmm11
vpsrlq $61,%xmm5,%xmm11
# qhasm: r3Sigma1 = r3>>>14
# asm 1: rorx $14,<r3=int64#6,>r3Sigma1=int64#15
# asm 2: rorx $14,<r3=%r9,>r3Sigma1=%rbp
rorx $14,%r9,%rbp
# qhasm: r7 += maj7
# asm 1: add <maj7=int64#12,<r7=int64#11
# asm 2: add <maj7=%r14,<r7=%r13
add %r14,%r13
# qhasm: X9sigma0 = X9sigma0 ^ X9left56
# asm 1: vpxor <X9sigma0=reg256#7,<X9left56=reg256#10,>X9sigma0=reg256#7
# asm 2: vpxor <X9sigma0=%ymm6,<X9left56=%ymm9,>X9sigma0=%ymm6
vpxor %ymm6,%ymm9,%ymm6
# qhasm: 4x X9right7 = X9 unsigned>> 7
# asm 1: vpsrlq $7,<X9=reg256#9,>X9right7=reg256#9
# asm 2: vpsrlq $7,<X9=%ymm8,>X9right7=%ymm8
vpsrlq $7,%ymm8,%ymm8
# qhasm: r318 = r3>>>18
# asm 1: rorx $18,<r3=int64#6,>r318=int64#12
# asm 2: rorx $18,<r3=%r9,>r318=%r14
rorx $18,%r9,%r14
# qhasm: 1x,0 W6sigma1 = W6right19 ^ W6left45
# asm 1: vpxor <W6right19=reg256#8%128,<W6left45=reg256#11%128,>W6sigma1=reg256#8%128
# asm 2: vpxor <W6right19=%xmm7,<W6left45=%xmm10,>W6sigma1=%xmm7
vpxor %xmm7,%xmm10,%xmm7
# qhasm: ch6 &= r3
# asm 1: and <r3=int64#6,<ch6=int64#13
# asm 2: and <r3=%r9,<ch6=%r15
and %r9,%r15
# qhasm: r3Sigma1 ^= r318
# asm 1: xor <r318=int64#12,<r3Sigma1=int64#15
# asm 2: xor <r318=%r14,<r3Sigma1=%rbp
xor %r14,%rbp
# qhasm: r341 = r3>>>41
# asm 1: rorx $41,<r3=int64#6,>r341=int64#12
# asm 2: rorx $41,<r3=%r9,>r341=%r14
rorx $41,%r9,%r14
# qhasm: X9sigma0 = X9sigma0 ^ X9right7
# asm 1: vpxor <X9sigma0=reg256#7,<X9right7=reg256#9,>X9sigma0=reg256#7
# asm 2: vpxor <X9sigma0=%ymm6,<X9right7=%ymm8,>X9sigma0=%ymm6
vpxor %ymm6,%ymm8,%ymm6
# qhasm: maj6 &= r7
# asm 1: and <r7=int64#11,<maj6=int64#14
# asm 2: and <r7=%r13,<maj6=%rbx
and %r13,%rbx
# qhasm: 1x,0 W6sigma1 ^= W6right61
# asm 1: vpxor <W6right61=reg256#12%128,<W6sigma1=reg256#8%128,<W6sigma1=reg256#8%128
# asm 2: vpxor <W6right61=%xmm11,<W6sigma1=%xmm7,<W6sigma1=%xmm7
vpxor %xmm11,%xmm7,%xmm7
# qhasm: 4x X8 = X8 + X9sigma0
# asm 1: vpaddq <X8=reg256#4,<X9sigma0=reg256#7,>X8=reg256#4
# asm 2: vpaddq <X8=%ymm3,<X9sigma0=%ymm6,>X8=%ymm3
vpaddq %ymm3,%ymm6,%ymm3
# qhasm: r3Sigma1 ^= r341
# asm 1: xor <r341=int64#12,<r3Sigma1=int64#15
# asm 2: xor <r341=%r14,<r3Sigma1=%rbp
xor %r14,%rbp
# qhasm: maj6 ^= r0andr1
# asm 1: xor <r0andr1=int64#7,<maj6=int64#14
# asm 2: xor <r0andr1=%rax,<maj6=%rbx
xor %rax,%rbx
# qhasm: 2x,0 W6left3 = W6 << 3
# asm 1: vpsllq $3,<W6=reg256#6%128,>W6left3=reg256#7%128
# asm 2: vpsllq $3,<W6=%xmm5,>W6left3=%xmm6
vpsllq $3,%xmm5,%xmm6
# qhasm: r7Sigma0 = r7>>>28
# asm 1: rorx $28,<r7=int64#11,>r7Sigma0=int64#7
# asm 2: rorx $28,<r7=%r13,>r7Sigma0=%rax
rorx $28,%r13,%rax
# qhasm: ch6 ^= r5
# asm 1: xor <r5=int64#8,<ch6=int64#13
# asm 2: xor <r5=%r10,<ch6=%r15
xor %r10,%r15
# qhasm: r6 += r3Sigma1
# asm 1: add <r3Sigma1=int64#15,<r6=int64#10
# asm 2: add <r3Sigma1=%rbp,<r6=%r12
add %rbp,%r12
# qhasm: 4x X8 = X8 + mem256[&w + 8]
# asm 1: vpaddq <w=stack1280#1,<X8=reg256#4,>X8=reg256#4
# asm 2: vpaddq <w=168(%rsp),<X8=%ymm3,>X8=%ymm3
vpaddq 168(%rsp),%ymm3,%ymm3
# qhasm: r734 = r7>>>34
# asm 1: rorx $34,<r7=int64#11,>r734=int64#12
# asm 2: rorx $34,<r7=%r13,>r734=%r14
rorx $34,%r13,%r14
# qhasm: r5 += wc0123[2]
# asm 1: addq <wc0123=stack256#3,<r5=int64#8
# asm 2: addq <wc0123=80(%rsp),<r5=%r10
addq 80(%rsp),%r10
# qhasm: r6 += ch6
# asm 1: add <ch6=int64#13,<r6=int64#10
# asm 2: add <ch6=%r15,<r6=%r12
add %r15,%r12
# qhasm: r7Sigma0 ^= r734
# asm 1: xor <r734=int64#12,<r7Sigma0=int64#7
# asm 2: xor <r734=%r14,<r7Sigma0=%rax
xor %r14,%rax
# qhasm: 1x,0 W6sigma1 ^= W6left3
# asm 1: vpxor <W6left3=reg256#7%128,<W6sigma1=reg256#8%128,<W6sigma1=reg256#8%128
# asm 2: vpxor <W6left3=%xmm6,<W6sigma1=%xmm7,<W6sigma1=%xmm7
vpxor %xmm6,%xmm7,%xmm7
# qhasm: r739 = r7>>>39
# asm 1: rorx $39,<r7=int64#11,>r739=int64#12
# asm 2: rorx $39,<r7=%r13,>r739=%r14
rorx $39,%r13,%r14
# qhasm: r2 += r6
# asm 1: add <r6=int64#10,<r2=int64#3
# asm 2: add <r6=%r12,<r2=%rdx
add %r12,%rdx
# qhasm: r7Sigma0 ^= r739
# asm 1: xor <r739=int64#12,<r7Sigma0=int64#7
# asm 2: xor <r739=%r14,<r7Sigma0=%rax
xor %r14,%rax
# qhasm: ch5 = r4
# asm 1: mov <r4=int64#9,>ch5=int64#12
# asm 2: mov <r4=%r11,>ch5=%r14
mov %r11,%r14
# qhasm: 2x,0 W6right6 = W6 unsigned>> 6
# asm 1: vpsrlq $6,<W6=reg256#6%128,>W6right6=reg256#6%128
# asm 2: vpsrlq $6,<W6=%xmm5,>W6right6=%xmm5
vpsrlq $6,%xmm5,%xmm5
# qhasm: r6 += maj6
# asm 1: add <maj6=int64#14,<r6=int64#10
# asm 2: add <maj6=%rbx,<r6=%r12
add %rbx,%r12
# qhasm: ch5 ^= r3
# asm 1: xor <r3=int64#6,<ch5=int64#12
# asm 2: xor <r3=%r9,<ch5=%r14
xor %r9,%r14
# qhasm: r6 += r7Sigma0
# asm 1: add <r7Sigma0=int64#7,<r6=int64#10
# asm 2: add <r7Sigma0=%rax,<r6=%r12
add %rax,%r12
# qhasm: r2Sigma1 = r2>>>14
# asm 1: rorx $14,<r2=int64#3,>r2Sigma1=int64#7
# asm 2: rorx $14,<r2=%rdx,>r2Sigma1=%rax
rorx $14,%rdx,%rax
# qhasm: 1x,0 W6sigma1 ^= W6right6
# asm 1: vpxor <W6right6=reg256#6%128,<W6sigma1=reg256#8%128,<W6sigma1=reg256#8%128
# asm 2: vpxor <W6right6=%xmm5,<W6sigma1=%xmm7,<W6sigma1=%xmm7
vpxor %xmm5,%xmm7,%xmm7
# qhasm: ch5 &= r2
# asm 1: and <r2=int64#3,<ch5=int64#12
# asm 2: and <r2=%rdx,<ch5=%r14
and %rdx,%r14
# qhasm: r218 = r2>>>18
# asm 1: rorx $18,<r2=int64#3,>r218=int64#13
# asm 2: rorx $18,<r2=%rdx,>r218=%r15
rorx $18,%rdx,%r15
# qhasm: r241 = r2>>>41
# asm 1: rorx $41,<r2=int64#3,>r241=int64#14
# asm 2: rorx $41,<r2=%rdx,>r241=%rbx
rorx $41,%rdx,%rbx
# qhasm: 4x X8 = W6sigma1 + X8
# asm 1: vpaddq <W6sigma1=reg256#8,<X8=reg256#4,>X8=reg256#4
# asm 2: vpaddq <W6sigma1=%ymm7,<X8=%ymm3,>X8=%ymm3
vpaddq %ymm7,%ymm3,%ymm3
# qhasm: 2x,0 W8right19 = X8 unsigned>> 19
# asm 1: vpsrlq $19,<X8=reg256#4%128,>W8right19=reg256#6%128
# asm 2: vpsrlq $19,<X8=%xmm3,>W8right19=%xmm5
vpsrlq $19,%xmm3,%xmm5
# qhasm: ch5 ^= r4
# asm 1: xor <r4=int64#9,<ch5=int64#12
# asm 2: xor <r4=%r11,<ch5=%r14
xor %r11,%r14
# qhasm: r2Sigma1 ^= r218
# asm 1: xor <r218=int64#13,<r2Sigma1=int64#7
# asm 2: xor <r218=%r15,<r2Sigma1=%rax
xor %r15,%rax
# qhasm: r6Sigma0 = r6>>>28
# asm 1: rorx $28,<r6=int64#10,>r6Sigma0=int64#13
# asm 2: rorx $28,<r6=%r12,>r6Sigma0=%r15
rorx $28,%r12,%r15
# qhasm: r5 += ch5
# asm 1: add <ch5=int64#12,<r5=int64#8
# asm 2: add <ch5=%r14,<r5=%r10
add %r14,%r10
# qhasm: 2x,0 W8left45 = X8 << 45
# asm 1: vpsllq $45,<X8=reg256#4%128,>W8left45=reg256#7%128
# asm 2: vpsllq $45,<X8=%xmm3,>W8left45=%xmm6
vpsllq $45,%xmm3,%xmm6
# qhasm: r634 = r6>>>34
# asm 1: rorx $34,<r6=int64#10,>r634=int64#12
# asm 2: rorx $34,<r6=%r12,>r634=%r14
rorx $34,%r12,%r14
# qhasm: r2Sigma1 ^= r241
# asm 1: xor <r241=int64#14,<r2Sigma1=int64#7
# asm 2: xor <r241=%rbx,<r2Sigma1=%rax
xor %rbx,%rax
# qhasm: maj4 = r7
# asm 1: mov <r7=int64#11,>maj4=int64#14
# asm 2: mov <r7=%r13,>maj4=%rbx
mov %r13,%rbx
# qhasm: 2x,0 W8right61 = X8 unsigned>> 61
# asm 1: vpsrlq $61,<X8=reg256#4%128,>W8right61=reg256#8%128
# asm 2: vpsrlq $61,<X8=%xmm3,>W8right61=%xmm7
vpsrlq $61,%xmm3,%xmm7
# qhasm: maj4 ^= r6
# asm 1: xor <r6=int64#10,<maj4=int64#14
# asm 2: xor <r6=%r12,<maj4=%rbx
xor %r12,%rbx
# qhasm: r6Sigma0 ^= r634
# asm 1: xor <r634=int64#12,<r6Sigma0=int64#13
# asm 2: xor <r634=%r14,<r6Sigma0=%r15
xor %r14,%r15
# qhasm: 1x,0 W8sigma1 = W8right19 ^ W8left45
# asm 1: vpxor <W8right19=reg256#6%128,<W8left45=reg256#7%128,>W8sigma1=reg256#6%128
# asm 2: vpxor <W8right19=%xmm5,<W8left45=%xmm6,>W8sigma1=%xmm5
vpxor %xmm5,%xmm6,%xmm5
# qhasm: r639 = r6>>>39
# asm 1: rorx $39,<r6=int64#10,>r639=int64#12
# asm 2: rorx $39,<r6=%r12,>r639=%r14
rorx $39,%r12,%r14
# qhasm: 1x,0 W8sigma1 ^= W8right61
# asm 1: vpxor <W8right61=reg256#8%128,<W8sigma1=reg256#6%128,<W8sigma1=reg256#6%128
# asm 2: vpxor <W8right61=%xmm7,<W8sigma1=%xmm5,<W8sigma1=%xmm5
vpxor %xmm7,%xmm5,%xmm5
# qhasm: 2x,0 W8left3 = X8 << 3
# asm 1: vpsllq $3,<X8=reg256#4%128,>W8left3=reg256#7%128
# asm 2: vpsllq $3,<X8=%xmm3,>W8left3=%xmm6
vpsllq $3,%xmm3,%xmm6
# qhasm: r6Sigma0 ^= r639
# asm 1: xor <r639=int64#12,<r6Sigma0=int64#13
# asm 2: xor <r639=%r14,<r6Sigma0=%r15
xor %r14,%r15
# qhasm: r5 += r2Sigma1
# asm 1: add <r2Sigma1=int64#7,<r5=int64#8
# asm 2: add <r2Sigma1=%rax,<r5=%r10
add %rax,%r10
# qhasm: r1 += r5
# asm 1: add <r5=int64#8,<r1=int64#4
# asm 2: add <r5=%r10,<r1=%rcx
add %r10,%rcx
# qhasm: 2x,0 W8right6 = X8 unsigned>> 6
# asm 1: vpsrlq $6,<X8=reg256#4%128,>W8right6=reg256#8%128
# asm 2: vpsrlq $6,<X8=%xmm3,>W8right6=%xmm7
vpsrlq $6,%xmm3,%xmm7
# qhasm: 1x,0 W8sigma1 ^= W8left3
# asm 1: vpxor <W8left3=reg256#7%128,<W8sigma1=reg256#6%128,<W8sigma1=reg256#6%128
# asm 2: vpxor <W8left3=%xmm6,<W8sigma1=%xmm5,<W8sigma1=%xmm5
vpxor %xmm6,%xmm5,%xmm5
# qhasm: r6andr7 = r7
# asm 1: mov <r7=int64#11,>r6andr7=int64#7
# asm 2: mov <r7=%r13,>r6andr7=%rax
mov %r13,%rax
# qhasm: r6andr7 &= r6
# asm 1: and <r6=int64#10,<r6andr7=int64#7
# asm 2: and <r6=%r12,<r6andr7=%rax
and %r12,%rax
# qhasm: r1Sigma1 = r1>>>14
# asm 1: rorx $14,<r1=int64#4,>r1Sigma1=int64#12
# asm 2: rorx $14,<r1=%rcx,>r1Sigma1=%r14
rorx $14,%rcx,%r14
# qhasm: 1x,0 W8sigma1 ^= W8right6
# asm 1: vpxor <W8right6=reg256#8%128,<W8sigma1=reg256#6%128,<W8sigma1=reg256#6%128
# asm 2: vpxor <W8right6=%xmm7,<W8sigma1=%xmm5,<W8sigma1=%xmm5
vpxor %xmm7,%xmm5,%xmm5
# qhasm: r5 += r6Sigma0
# asm 1: add <r6Sigma0=int64#13,<r5=int64#8
# asm 2: add <r6Sigma0=%r15,<r5=%r10
add %r15,%r10
# qhasm: maj5 = r0
# asm 1: mov <r0=int64#2,>maj5=int64#13
# asm 2: mov <r0=%rsi,>maj5=%r15
mov %rsi,%r15
# qhasm: maj5 &= maj4
# asm 1: and <maj4=int64#14,<maj5=int64#13
# asm 2: and <maj4=%rbx,<maj5=%r15
and %rbx,%r15
# qhasm: W8sigma1 = W8sigma1[2,3,0,1]
# asm 1: vpermq $0x4e,<W8sigma1=reg256#6,>W8sigma1=reg256#6
# asm 2: vpermq $0x4e,<W8sigma1=%ymm5,>W8sigma1=%ymm5
vpermq $0x4e,%ymm5,%ymm5
# qhasm: maj5 ^= r6andr7
# asm 1: xor <r6andr7=int64#7,<maj5=int64#13
# asm 2: xor <r6andr7=%rax,<maj5=%r15
xor %rax,%r15
# qhasm: ch4 = r3
# asm 1: mov <r3=int64#6,>ch4=int64#15
# asm 2: mov <r3=%r9,>ch4=%rbp
mov %r9,%rbp
# qhasm: ch4 ^= r2
# asm 1: xor <r2=int64#3,<ch4=int64#15
# asm 2: xor <r2=%rdx,<ch4=%rbp
xor %rdx,%rbp
# qhasm: X13 = mem256[&w + 104]
# asm 1: vmovupd <w=stack1280#1,>X13=reg256#7
# asm 2: vmovupd <w=264(%rsp),>X13=%ymm6
vmovupd 264(%rsp),%ymm6
# qhasm: 4x X13right1 = X13 unsigned>> 1
# asm 1: vpsrlq $1,<X13=reg256#7,>X13right1=reg256#8
# asm 2: vpsrlq $1,<X13=%ymm6,>X13right1=%ymm7
vpsrlq $1,%ymm6,%ymm7
# qhasm: r5 += maj5
# asm 1: add <maj5=int64#13,<r5=int64#8
# asm 2: add <maj5=%r15,<r5=%r10
add %r15,%r10
# qhasm: r118 = r1>>>18
# asm 1: rorx $18,<r1=int64#4,>r118=int64#13
# asm 2: rorx $18,<r1=%rcx,>r118=%r15
rorx $18,%rcx,%r15
# qhasm: ch4 &= r1
# asm 1: and <r1=int64#4,<ch4=int64#15
# asm 2: and <r1=%rcx,<ch4=%rbp
and %rcx,%rbp
# qhasm: maj4 &= r5
# asm 1: and <r5=int64#8,<maj4=int64#14
# asm 2: and <r5=%r10,<maj4=%rbx
and %r10,%rbx
# qhasm: ch4 ^= r3
# asm 1: xor <r3=int64#6,<ch4=int64#15
# asm 2: xor <r3=%r9,<ch4=%rbp
xor %r9,%rbp
# qhasm: r4 += wc0123[3]
# asm 1: addq <wc0123=stack256#3,<r4=int64#9
# asm 2: addq <wc0123=88(%rsp),<r4=%r11
addq 88(%rsp),%r11
# qhasm: r1Sigma1 ^= r118
# asm 1: xor <r118=int64#13,<r1Sigma1=int64#12
# asm 2: xor <r118=%r15,<r1Sigma1=%r14
xor %r15,%r14
# qhasm: r141 = r1>>>41
# asm 1: rorx $41,<r1=int64#4,>r141=int64#13
# asm 2: rorx $41,<r1=%rcx,>r141=%r15
rorx $41,%rcx,%r15
# qhasm: 4x X8 = X8 + W8sigma1
# asm 1: vpaddq <X8=reg256#4,<W8sigma1=reg256#6,>X8=reg256#4
# asm 2: vpaddq <X8=%ymm3,<W8sigma1=%ymm5,>X8=%ymm3
vpaddq %ymm3,%ymm5,%ymm3
# qhasm: mem256[&w + 64] = X8
# asm 1: vmovupd <X8=reg256#4,<w=stack1280#1
# asm 2: vmovupd <X8=%ymm3,<w=224(%rsp)
vmovupd %ymm3,224(%rsp)
# qhasm: r4 += ch4
# asm 1: add <ch4=int64#15,<r4=int64#9
# asm 2: add <ch4=%rbp,<r4=%r11
add %rbp,%r11
# qhasm: maj4 ^= r6andr7
# asm 1: xor <r6andr7=int64#7,<maj4=int64#14
# asm 2: xor <r6andr7=%rax,<maj4=%rbx
xor %rax,%rbx
# qhasm: r5Sigma0 = r5>>>28
# asm 1: rorx $28,<r5=int64#8,>r5Sigma0=int64#7
# asm 2: rorx $28,<r5=%r10,>r5Sigma0=%rax
rorx $28,%r10,%rax
# qhasm: 4x D8 = X8 + mem256[constants + 64]
# asm 1: vpaddq 64(<constants=int64#5),<X8=reg256#4,>D8=reg256#6
# asm 2: vpaddq 64(<constants=%r8),<X8=%ymm3,>D8=%ymm5
vpaddq 64(%r8),%ymm3,%ymm5
# qhasm: wc891011 = D8
# asm 1: vmovapd <D8=reg256#6,>wc891011=stack256#3
# asm 2: vmovapd <D8=%ymm5,>wc891011=64(%rsp)
vmovapd %ymm5,64(%rsp)
# qhasm: r534 = r5>>>34
# asm 1: rorx $34,<r5=int64#8,>r534=int64#15
# asm 2: rorx $34,<r5=%r10,>r534=%rbp
rorx $34,%r10,%rbp
# qhasm: r1Sigma1 ^= r141
# asm 1: xor <r141=int64#13,<r1Sigma1=int64#12
# asm 2: xor <r141=%r15,<r1Sigma1=%r14
xor %r15,%r14
# qhasm: r4 += r1Sigma1
# asm 1: add <r1Sigma1=int64#12,<r4=int64#9
# asm 2: add <r1Sigma1=%r14,<r4=%r11
add %r14,%r11
# qhasm: r5Sigma0 ^= r534
# asm 1: xor <r534=int64#15,<r5Sigma0=int64#7
# asm 2: xor <r534=%rbp,<r5Sigma0=%rax
xor %rbp,%rax
# qhasm: r539 = r5>>>39
# asm 1: rorx $39,<r5=int64#8,>r539=int64#12
# asm 2: rorx $39,<r5=%r10,>r539=%r14
rorx $39,%r10,%r14
# qhasm: r3 += wc4567[0]
# asm 1: addq <wc4567=stack256#4,<r3=int64#6
# asm 2: addq <wc4567=96(%rsp),<r3=%r9
addq 96(%rsp),%r9
# qhasm: r0 += r4
# asm 1: add <r4=int64#9,<r0=int64#2
# asm 2: add <r4=%r11,<r0=%rsi
add %r11,%rsi
# qhasm: r4 += maj4
# asm 1: add <maj4=int64#14,<r4=int64#9
# asm 2: add <maj4=%rbx,<r4=%r11
add %rbx,%r11
# qhasm: W10 = mem128[&w + 80],0
# asm 1: vmovupd <w=stack1280#1,>W10=reg256#6%128
# asm 2: vmovupd <w=240(%rsp),>W10=%xmm5
vmovupd 240(%rsp),%xmm5
# qhasm: r5Sigma0 ^= r539
# asm 1: xor <r539=int64#12,<r5Sigma0=int64#7
# asm 2: xor <r539=%r14,<r5Sigma0=%rax
xor %r14,%rax
# qhasm: r0Sigma1 = r0>>>14
# asm 1: rorx $14,<r0=int64#2,>r0Sigma1=int64#12
# asm 2: rorx $14,<r0=%rsi,>r0Sigma1=%r14
rorx $14,%rsi,%r14
# qhasm: r4 += r5Sigma0
# asm 1: add <r5Sigma0=int64#7,<r4=int64#9
# asm 2: add <r5Sigma0=%rax,<r4=%r11
add %rax,%r11
# qhasm: ch3 = r2
# asm 1: mov <r2=int64#3,>ch3=int64#7
# asm 2: mov <r2=%rdx,>ch3=%rax
mov %rdx,%rax
# qhasm: ch3 ^= r1
# asm 1: xor <r1=int64#4,<ch3=int64#7
# asm 2: xor <r1=%rcx,<ch3=%rax
xor %rcx,%rax
# qhasm: ch3 &= r0
# asm 1: and <r0=int64#2,<ch3=int64#7
# asm 2: and <r0=%rsi,<ch3=%rax
and %rsi,%rax
# qhasm: 4x X13left63 = X13 << 63
# asm 1: vpsllq $63,<X13=reg256#7,>X13left63=reg256#9
# asm 2: vpsllq $63,<X13=%ymm6,>X13left63=%ymm8
vpsllq $63,%ymm6,%ymm8
# qhasm: r018 = r0>>>18
# asm 1: rorx $18,<r0=int64#2,>r018=int64#13
# asm 2: rorx $18,<r0=%rsi,>r018=%r15
rorx $18,%rsi,%r15
# qhasm: maj2 = r5
# asm 1: mov <r5=int64#8,>maj2=int64#14
# asm 2: mov <r5=%r10,>maj2=%rbx
mov %r10,%rbx
# qhasm: maj2 ^= r4
# asm 1: xor <r4=int64#9,<maj2=int64#14
# asm 2: xor <r4=%r11,<maj2=%rbx
xor %r11,%rbx
# qhasm: X13sigma0 = X13right1 ^ X13left63
# asm 1: vpxor <X13right1=reg256#8,<X13left63=reg256#9,>X13sigma0=reg256#8
# asm 2: vpxor <X13right1=%ymm7,<X13left63=%ymm8,>X13sigma0=%ymm7
vpxor %ymm7,%ymm8,%ymm7
# qhasm: r041 = r0>>>41
# asm 1: rorx $41,<r0=int64#2,>r041=int64#15
# asm 2: rorx $41,<r0=%rsi,>r041=%rbp
rorx $41,%rsi,%rbp
# qhasm: r0Sigma1 ^= r018
# asm 1: xor <r018=int64#13,<r0Sigma1=int64#12
# asm 2: xor <r018=%r15,<r0Sigma1=%r14
xor %r15,%r14
# qhasm: ch3 ^= r2
# asm 1: xor <r2=int64#3,<ch3=int64#7
# asm 2: xor <r2=%rdx,<ch3=%rax
xor %rdx,%rax
# qhasm: 4x X13right8 = X13 unsigned>> 8
# asm 1: vpsrlq $8,<X13=reg256#7,>X13right8=reg256#9
# asm 2: vpsrlq $8,<X13=%ymm6,>X13right8=%ymm8
vpsrlq $8,%ymm6,%ymm8
# qhasm: r4Sigma0 = r4>>>28
# asm 1: rorx $28,<r4=int64#9,>r4Sigma0=int64#13
# asm 2: rorx $28,<r4=%r11,>r4Sigma0=%r15
rorx $28,%r11,%r15
# qhasm: r0Sigma1 ^= r041
# asm 1: xor <r041=int64#15,<r0Sigma1=int64#12
# asm 2: xor <r041=%rbp,<r0Sigma1=%r14
xor %rbp,%r14
# qhasm: r3 += ch3
# asm 1: add <ch3=int64#7,<r3=int64#6
# asm 2: add <ch3=%rax,<r3=%r9
add %rax,%r9
# qhasm: r434 = r4>>>34
# asm 1: rorx $34,<r4=int64#9,>r434=int64#7
# asm 2: rorx $34,<r4=%r11,>r434=%rax
rorx $34,%r11,%rax
# qhasm: X13sigma0 = X13sigma0 ^ X13right8
# asm 1: vpxor <X13sigma0=reg256#8,<X13right8=reg256#9,>X13sigma0=reg256#8
# asm 2: vpxor <X13sigma0=%ymm7,<X13right8=%ymm8,>X13sigma0=%ymm7
vpxor %ymm7,%ymm8,%ymm7
# qhasm: r3 += r0Sigma1
# asm 1: add <r0Sigma1=int64#12,<r3=int64#6
# asm 2: add <r0Sigma1=%r14,<r3=%r9
add %r14,%r9
# qhasm: maj3 = r6
# asm 1: mov <r6=int64#10,>maj3=int64#12
# asm 2: mov <r6=%r12,>maj3=%r14
mov %r12,%r14
# qhasm: maj3 &= maj2
# asm 1: and <maj2=int64#14,<maj3=int64#12
# asm 2: and <maj2=%rbx,<maj3=%r14
and %rbx,%r14
# qhasm: 2x,0 W10right19 = W10 unsigned>> 19
# asm 1: vpsrlq $19,<W10=reg256#6%128,>W10right19=reg256#9%128
# asm 2: vpsrlq $19,<W10=%xmm5,>W10right19=%xmm8
vpsrlq $19,%xmm5,%xmm8
# qhasm: r4Sigma0 ^= r434
# asm 1: xor <r434=int64#7,<r4Sigma0=int64#13
# asm 2: xor <r434=%rax,<r4Sigma0=%r15
xor %rax,%r15
# qhasm: r7 += r3
# asm 1: add <r3=int64#6,<r7=int64#11
# asm 2: add <r3=%r9,<r7=%r13
add %r9,%r13
# qhasm: r439 = r4>>>39
# asm 1: rorx $39,<r4=int64#9,>r439=int64#7
# asm 2: rorx $39,<r4=%r11,>r439=%rax
rorx $39,%r11,%rax
# qhasm: 4x X13left56 = X13 << 56
# asm 1: vpsllq $56,<X13=reg256#7,>X13left56=reg256#10
# asm 2: vpsllq $56,<X13=%ymm6,>X13left56=%ymm9
vpsllq $56,%ymm6,%ymm9
# qhasm: r4Sigma0 ^= r439
# asm 1: xor <r439=int64#7,<r4Sigma0=int64#13
# asm 2: xor <r439=%rax,<r4Sigma0=%r15
xor %rax,%r15
# qhasm: r2 += wc4567[1]
# asm 1: addq <wc4567=stack256#4,<r2=int64#3
# asm 2: addq <wc4567=104(%rsp),<r2=%rdx
addq 104(%rsp),%rdx
# qhasm: r4andr5 = r5
# asm 1: mov <r5=int64#8,>r4andr5=int64#7
# asm 2: mov <r5=%r10,>r4andr5=%rax
mov %r10,%rax
# qhasm: 2x,0 W10left45 = W10 << 45
# asm 1: vpsllq $45,<W10=reg256#6%128,>W10left45=reg256#11%128
# asm 2: vpsllq $45,<W10=%xmm5,>W10left45=%xmm10
vpsllq $45,%xmm5,%xmm10
# qhasm: r4andr5 &= r4
# asm 1: and <r4=int64#9,<r4andr5=int64#7
# asm 2: and <r4=%r11,<r4andr5=%rax
and %r11,%rax
# qhasm: r3 += r4Sigma0
# asm 1: add <r4Sigma0=int64#13,<r3=int64#6
# asm 2: add <r4Sigma0=%r15,<r3=%r9
add %r15,%r9
# qhasm: maj3 ^= r4andr5
# asm 1: xor <r4andr5=int64#7,<maj3=int64#12
# asm 2: xor <r4andr5=%rax,<maj3=%r14
xor %rax,%r14
# qhasm: ch2 = r1
# asm 1: mov <r1=int64#4,>ch2=int64#13
# asm 2: mov <r1=%rcx,>ch2=%r15
mov %rcx,%r15
# qhasm: 2x,0 W10right61 = W10 unsigned>> 61
# asm 1: vpsrlq $61,<W10=reg256#6%128,>W10right61=reg256#12%128
# asm 2: vpsrlq $61,<W10=%xmm5,>W10right61=%xmm11
vpsrlq $61,%xmm5,%xmm11
# qhasm: ch2 ^= r0
# asm 1: xor <r0=int64#2,<ch2=int64#13
# asm 2: xor <r0=%rsi,<ch2=%r15
xor %rsi,%r15
# qhasm: X13sigma0 = X13sigma0 ^ X13left56
# asm 1: vpxor <X13sigma0=reg256#8,<X13left56=reg256#10,>X13sigma0=reg256#8
# asm 2: vpxor <X13sigma0=%ymm7,<X13left56=%ymm9,>X13sigma0=%ymm7
vpxor %ymm7,%ymm9,%ymm7
# qhasm: r7Sigma1 = r7>>>14
# asm 1: rorx $14,<r7=int64#11,>r7Sigma1=int64#15
# asm 2: rorx $14,<r7=%r13,>r7Sigma1=%rbp
rorx $14,%r13,%rbp
# qhasm: 4x X13right7 = X13 unsigned>> 7
# asm 1: vpsrlq $7,<X13=reg256#7,>X13right7=reg256#7
# asm 2: vpsrlq $7,<X13=%ymm6,>X13right7=%ymm6
vpsrlq $7,%ymm6,%ymm6
# qhasm: r3 += maj3
# asm 1: add <maj3=int64#12,<r3=int64#6
# asm 2: add <maj3=%r14,<r3=%r9
add %r14,%r9
# qhasm: r718 = r7>>>18
# asm 1: rorx $18,<r7=int64#11,>r718=int64#12
# asm 2: rorx $18,<r7=%r13,>r718=%r14
rorx $18,%r13,%r14
# qhasm: 1x,0 W10sigma1 = W10right19 ^ W10left45
# asm 1: vpxor <W10right19=reg256#9%128,<W10left45=reg256#11%128,>W10sigma1=reg256#9%128
# asm 2: vpxor <W10right19=%xmm8,<W10left45=%xmm10,>W10sigma1=%xmm8
vpxor %xmm8,%xmm10,%xmm8
# qhasm: ch2 &= r7
# asm 1: and <r7=int64#11,<ch2=int64#13
# asm 2: and <r7=%r13,<ch2=%r15
and %r13,%r15
# qhasm: r7Sigma1 ^= r718
# asm 1: xor <r718=int64#12,<r7Sigma1=int64#15
# asm 2: xor <r718=%r14,<r7Sigma1=%rbp
xor %r14,%rbp
# qhasm: r741 = r7>>>41
# asm 1: rorx $41,<r7=int64#11,>r741=int64#12
# asm 2: rorx $41,<r7=%r13,>r741=%r14
rorx $41,%r13,%r14
# qhasm: X13sigma0 = X13sigma0 ^ X13right7
# asm 1: vpxor <X13sigma0=reg256#8,<X13right7=reg256#7,>X13sigma0=reg256#7
# asm 2: vpxor <X13sigma0=%ymm7,<X13right7=%ymm6,>X13sigma0=%ymm6
vpxor %ymm7,%ymm6,%ymm6
# qhasm: maj2 &= r3
# asm 1: and <r3=int64#6,<maj2=int64#14
# asm 2: and <r3=%r9,<maj2=%rbx
and %r9,%rbx
# qhasm: 1x,0 W10sigma1 ^= W10right61
# asm 1: vpxor <W10right61=reg256#12%128,<W10sigma1=reg256#9%128,<W10sigma1=reg256#9%128
# asm 2: vpxor <W10right61=%xmm11,<W10sigma1=%xmm8,<W10sigma1=%xmm8
vpxor %xmm11,%xmm8,%xmm8
# qhasm: 4x X12 = X12 + X13sigma0
# asm 1: vpaddq <X12=reg256#5,<X13sigma0=reg256#7,>X12=reg256#5
# asm 2: vpaddq <X12=%ymm4,<X13sigma0=%ymm6,>X12=%ymm4
vpaddq %ymm4,%ymm6,%ymm4
# qhasm: r7Sigma1 ^= r741
# asm 1: xor <r741=int64#12,<r7Sigma1=int64#15
# asm 2: xor <r741=%r14,<r7Sigma1=%rbp
xor %r14,%rbp
# qhasm: maj2 ^= r4andr5
# asm 1: xor <r4andr5=int64#7,<maj2=int64#14
# asm 2: xor <r4andr5=%rax,<maj2=%rbx
xor %rax,%rbx
# qhasm: 2x,0 W10left3 = W10 << 3
# asm 1: vpsllq $3,<W10=reg256#6%128,>W10left3=reg256#7%128
# asm 2: vpsllq $3,<W10=%xmm5,>W10left3=%xmm6
vpsllq $3,%xmm5,%xmm6
# qhasm: r3Sigma0 = r3>>>28
# asm 1: rorx $28,<r3=int64#6,>r3Sigma0=int64#7
# asm 2: rorx $28,<r3=%r9,>r3Sigma0=%rax
rorx $28,%r9,%rax
# qhasm: ch2 ^= r1
# asm 1: xor <r1=int64#4,<ch2=int64#13
# asm 2: xor <r1=%rcx,<ch2=%r15
xor %rcx,%r15
# qhasm: r2 += r7Sigma1
# asm 1: add <r7Sigma1=int64#15,<r2=int64#3
# asm 2: add <r7Sigma1=%rbp,<r2=%rdx
add %rbp,%rdx
# qhasm: r334 = r3>>>34
# asm 1: rorx $34,<r3=int64#6,>r334=int64#12
# asm 2: rorx $34,<r3=%r9,>r334=%r14
rorx $34,%r9,%r14
# qhasm: 4x X12 = X12 + mem256[&w + 40]
# asm 1: vpaddq <w=stack1280#1,<X12=reg256#5,>X12=reg256#5
# asm 2: vpaddq <w=200(%rsp),<X12=%ymm4,>X12=%ymm4
vpaddq 200(%rsp),%ymm4,%ymm4
# qhasm: r1 += wc4567[2]
# asm 1: addq <wc4567=stack256#4,<r1=int64#4
# asm 2: addq <wc4567=112(%rsp),<r1=%rcx
addq 112(%rsp),%rcx
# qhasm: r2 += ch2
# asm 1: add <ch2=int64#13,<r2=int64#3
# asm 2: add <ch2=%r15,<r2=%rdx
add %r15,%rdx
# qhasm: 1x,0 W10sigma1 ^= W10left3
# asm 1: vpxor <W10left3=reg256#7%128,<W10sigma1=reg256#9%128,<W10sigma1=reg256#9%128
# asm 2: vpxor <W10left3=%xmm6,<W10sigma1=%xmm8,<W10sigma1=%xmm8
vpxor %xmm6,%xmm8,%xmm8
# qhasm: r3Sigma0 ^= r334
# asm 1: xor <r334=int64#12,<r3Sigma0=int64#7
# asm 2: xor <r334=%r14,<r3Sigma0=%rax
xor %r14,%rax
# qhasm: r339 = r3>>>39
# asm 1: rorx $39,<r3=int64#6,>r339=int64#12
# asm 2: rorx $39,<r3=%r9,>r339=%r14
rorx $39,%r9,%r14
# qhasm: 2x,0 W10right6 = W10 unsigned>> 6
# asm 1: vpsrlq $6,<W10=reg256#6%128,>W10right6=reg256#6%128
# asm 2: vpsrlq $6,<W10=%xmm5,>W10right6=%xmm5
vpsrlq $6,%xmm5,%xmm5
# qhasm: r6 += r2
# asm 1: add <r2=int64#3,<r6=int64#10
# asm 2: add <r2=%rdx,<r6=%r12
add %rdx,%r12
# qhasm: r3Sigma0 ^= r339
# asm 1: xor <r339=int64#12,<r3Sigma0=int64#7
# asm 2: xor <r339=%r14,<r3Sigma0=%rax
xor %r14,%rax
# qhasm: ch1 = r0
# asm 1: mov <r0=int64#2,>ch1=int64#12
# asm 2: mov <r0=%rsi,>ch1=%r14
mov %rsi,%r14
# qhasm: r2 += maj2
# asm 1: add <maj2=int64#14,<r2=int64#3
# asm 2: add <maj2=%rbx,<r2=%rdx
add %rbx,%rdx
# qhasm: ch1 ^= r7
# asm 1: xor <r7=int64#11,<ch1=int64#12
# asm 2: xor <r7=%r13,<ch1=%r14
xor %r13,%r14
# qhasm: r2 += r3Sigma0
# asm 1: add <r3Sigma0=int64#7,<r2=int64#3
# asm 2: add <r3Sigma0=%rax,<r2=%rdx
add %rax,%rdx
# qhasm: r6Sigma1 = r6>>>14
# asm 1: rorx $14,<r6=int64#10,>r6Sigma1=int64#7
# asm 2: rorx $14,<r6=%r12,>r6Sigma1=%rax
rorx $14,%r12,%rax
# qhasm: 1x,0 W10sigma1 ^= W10right6
# asm 1: vpxor <W10right6=reg256#6%128,<W10sigma1=reg256#9%128,<W10sigma1=reg256#9%128
# asm 2: vpxor <W10right6=%xmm5,<W10sigma1=%xmm8,<W10sigma1=%xmm8
vpxor %xmm5,%xmm8,%xmm8
# qhasm: ch1 &= r6
# asm 1: and <r6=int64#10,<ch1=int64#12
# asm 2: and <r6=%r12,<ch1=%r14
and %r12,%r14
# qhasm: r618 = r6>>>18
# asm 1: rorx $18,<r6=int64#10,>r618=int64#13
# asm 2: rorx $18,<r6=%r12,>r618=%r15
rorx $18,%r12,%r15
# qhasm: ch1 ^= r0
# asm 1: xor <r0=int64#2,<ch1=int64#12
# asm 2: xor <r0=%rsi,<ch1=%r14
xor %rsi,%r14
# qhasm: 4x X12 = W10sigma1 + X12
# asm 1: vpaddq <W10sigma1=reg256#9,<X12=reg256#5,>X12=reg256#5
# asm 2: vpaddq <W10sigma1=%ymm8,<X12=%ymm4,>X12=%ymm4
vpaddq %ymm8,%ymm4,%ymm4
# qhasm: r641 = r6>>>41
# asm 1: rorx $41,<r6=int64#10,>r641=int64#14
# asm 2: rorx $41,<r6=%r12,>r641=%rbx
rorx $41,%r12,%rbx
# qhasm: 2x,0 W12right19 = X12 unsigned>> 19
# asm 1: vpsrlq $19,<X12=reg256#5%128,>W12right19=reg256#6%128
# asm 2: vpsrlq $19,<X12=%xmm4,>W12right19=%xmm5
vpsrlq $19,%xmm4,%xmm5
# qhasm: r6Sigma1 ^= r618
# asm 1: xor <r618=int64#13,<r6Sigma1=int64#7
# asm 2: xor <r618=%r15,<r6Sigma1=%rax
xor %r15,%rax
# qhasm: r1 += ch1
# asm 1: add <ch1=int64#12,<r1=int64#4
# asm 2: add <ch1=%r14,<r1=%rcx
add %r14,%rcx
# qhasm: r2Sigma0 = r2>>>28
# asm 1: rorx $28,<r2=int64#3,>r2Sigma0=int64#12
# asm 2: rorx $28,<r2=%rdx,>r2Sigma0=%r14
rorx $28,%rdx,%r14
# qhasm: 2x,0 W12left45 = X12 << 45
# asm 1: vpsllq $45,<X12=reg256#5%128,>W12left45=reg256#7%128
# asm 2: vpsllq $45,<X12=%xmm4,>W12left45=%xmm6
vpsllq $45,%xmm4,%xmm6
# qhasm: r6Sigma1 ^= r641
# asm 1: xor <r641=int64#14,<r6Sigma1=int64#7
# asm 2: xor <r641=%rbx,<r6Sigma1=%rax
xor %rbx,%rax
# qhasm: r234 = r2>>>34
# asm 1: rorx $34,<r2=int64#3,>r234=int64#13
# asm 2: rorx $34,<r2=%rdx,>r234=%r15
rorx $34,%rdx,%r15
# qhasm: maj0 = r3
# asm 1: mov <r3=int64#6,>maj0=int64#14
# asm 2: mov <r3=%r9,>maj0=%rbx
mov %r9,%rbx
# qhasm: maj0 ^= r2
# asm 1: xor <r2=int64#3,<maj0=int64#14
# asm 2: xor <r2=%rdx,<maj0=%rbx
xor %rdx,%rbx
# qhasm: r2Sigma0 ^= r234
# asm 1: xor <r234=int64#13,<r2Sigma0=int64#12
# asm 2: xor <r234=%r15,<r2Sigma0=%r14
xor %r15,%r14
# qhasm: 2x,0 W12right61 = X12 unsigned>> 61
# asm 1: vpsrlq $61,<X12=reg256#5%128,>W12right61=reg256#8%128
# asm 2: vpsrlq $61,<X12=%xmm4,>W12right61=%xmm7
vpsrlq $61,%xmm4,%xmm7
# qhasm: r239 = r2>>>39
# asm 1: rorx $39,<r2=int64#3,>r239=int64#13
# asm 2: rorx $39,<r2=%rdx,>r239=%r15
rorx $39,%rdx,%r15
# qhasm: 1x,0 W12sigma1 = W12right19 ^ W12left45
# asm 1: vpxor <W12right19=reg256#6%128,<W12left45=reg256#7%128,>W12sigma1=reg256#6%128
# asm 2: vpxor <W12right19=%xmm5,<W12left45=%xmm6,>W12sigma1=%xmm5
vpxor %xmm5,%xmm6,%xmm5
# qhasm: 2x,0 W12left3 = X12 << 3
# asm 1: vpsllq $3,<X12=reg256#5%128,>W12left3=reg256#7%128
# asm 2: vpsllq $3,<X12=%xmm4,>W12left3=%xmm6
vpsllq $3,%xmm4,%xmm6
# qhasm: 1x,0 W12sigma1 ^= W12right61
# asm 1: vpxor <W12right61=reg256#8%128,<W12sigma1=reg256#6%128,<W12sigma1=reg256#6%128
# asm 2: vpxor <W12right61=%xmm7,<W12sigma1=%xmm5,<W12sigma1=%xmm5
vpxor %xmm7,%xmm5,%xmm5
# qhasm: r2Sigma0 ^= r239
# asm 1: xor <r239=int64#13,<r2Sigma0=int64#12
# asm 2: xor <r239=%r15,<r2Sigma0=%r14
xor %r15,%r14
# qhasm: r1 += r6Sigma1
# asm 1: add <r6Sigma1=int64#7,<r1=int64#4
# asm 2: add <r6Sigma1=%rax,<r1=%rcx
add %rax,%rcx
# qhasm: 2x,0 W12right6 = X12 unsigned>> 6
# asm 1: vpsrlq $6,<X12=reg256#5%128,>W12right6=reg256#8%128
# asm 2: vpsrlq $6,<X12=%xmm4,>W12right6=%xmm7
vpsrlq $6,%xmm4,%xmm7
# qhasm: r5 += r1
# asm 1: add <r1=int64#4,<r5=int64#8
# asm 2: add <r1=%rcx,<r5=%r10
add %rcx,%r10
# qhasm: 1x,0 W12sigma1 ^= W12left3
# asm 1: vpxor <W12left3=reg256#7%128,<W12sigma1=reg256#6%128,<W12sigma1=reg256#6%128
# asm 2: vpxor <W12left3=%xmm6,<W12sigma1=%xmm5,<W12sigma1=%xmm5
vpxor %xmm6,%xmm5,%xmm5
# qhasm: r2andr3 = r3
# asm 1: mov <r3=int64#6,>r2andr3=int64#7
# asm 2: mov <r3=%r9,>r2andr3=%rax
mov %r9,%rax
# qhasm: r2andr3 &= r2
# asm 1: and <r2=int64#3,<r2andr3=int64#7
# asm 2: and <r2=%rdx,<r2andr3=%rax
and %rdx,%rax
# qhasm: r5Sigma1 = r5>>>14
# asm 1: rorx $14,<r5=int64#8,>r5Sigma1=int64#13
# asm 2: rorx $14,<r5=%r10,>r5Sigma1=%r15
rorx $14,%r10,%r15
# qhasm: r1 += r2Sigma0
# asm 1: add <r2Sigma0=int64#12,<r1=int64#4
# asm 2: add <r2Sigma0=%r14,<r1=%rcx
add %r14,%rcx
# qhasm: 1x,0 W12sigma1 ^= W12right6
# asm 1: vpxor <W12right6=reg256#8%128,<W12sigma1=reg256#6%128,<W12sigma1=reg256#6%128
# asm 2: vpxor <W12right6=%xmm7,<W12sigma1=%xmm5,<W12sigma1=%xmm5
vpxor %xmm7,%xmm5,%xmm5
# qhasm: maj1 = r4
# asm 1: mov <r4=int64#9,>maj1=int64#12
# asm 2: mov <r4=%r11,>maj1=%r14
mov %r11,%r14
# qhasm: maj1 &= maj0
# asm 1: and <maj0=int64#14,<maj1=int64#12
# asm 2: and <maj0=%rbx,<maj1=%r14
and %rbx,%r14
# qhasm: W12sigma1 = W12sigma1[2,3,0,1]
# asm 1: vpermq $0x4e,<W12sigma1=reg256#6,>W12sigma1=reg256#6
# asm 2: vpermq $0x4e,<W12sigma1=%ymm5,>W12sigma1=%ymm5
vpermq $0x4e,%ymm5,%ymm5
# qhasm: maj1 ^= r2andr3
# asm 1: xor <r2andr3=int64#7,<maj1=int64#12
# asm 2: xor <r2andr3=%rax,<maj1=%r14
xor %rax,%r14
# qhasm: ch0 = r7
# asm 1: mov <r7=int64#11,>ch0=int64#15
# asm 2: mov <r7=%r13,>ch0=%rbp
mov %r13,%rbp
# qhasm: ch0 ^= r6
# asm 1: xor <r6=int64#10,<ch0=int64#15
# asm 2: xor <r6=%r12,<ch0=%rbp
xor %r12,%rbp
# qhasm: r1 += maj1
# asm 1: add <maj1=int64#12,<r1=int64#4
# asm 2: add <maj1=%r14,<r1=%rcx
add %r14,%rcx
# qhasm: ch0 &= r5
# asm 1: and <r5=int64#8,<ch0=int64#15
# asm 2: and <r5=%r10,<ch0=%rbp
and %r10,%rbp
# qhasm: r518 = r5>>>18
# asm 1: rorx $18,<r5=int64#8,>r518=int64#12
# asm 2: rorx $18,<r5=%r10,>r518=%r14
rorx $18,%r10,%r14
# qhasm: maj0 &= r1
# asm 1: and <r1=int64#4,<maj0=int64#14
# asm 2: and <r1=%rcx,<maj0=%rbx
and %rcx,%rbx
# qhasm: r0 += wc4567[3]
# asm 1: addq <wc4567=stack256#4,<r0=int64#2
# asm 2: addq <wc4567=120(%rsp),<r0=%rsi
addq 120(%rsp),%rsi
# qhasm: ch0 ^= r7
# asm 1: xor <r7=int64#11,<ch0=int64#15
# asm 2: xor <r7=%r13,<ch0=%rbp
xor %r13,%rbp
# qhasm: r5Sigma1 ^= r518
# asm 1: xor <r518=int64#12,<r5Sigma1=int64#13
# asm 2: xor <r518=%r14,<r5Sigma1=%r15
xor %r14,%r15
# qhasm: r541 = r5>>>41
# asm 1: rorx $41,<r5=int64#8,>r541=int64#12
# asm 2: rorx $41,<r5=%r10,>r541=%r14
rorx $41,%r10,%r14
# qhasm: 4x X12 = X12 + W12sigma1
# asm 1: vpaddq <X12=reg256#5,<W12sigma1=reg256#6,>X12=reg256#5
# asm 2: vpaddq <X12=%ymm4,<W12sigma1=%ymm5,>X12=%ymm4
vpaddq %ymm4,%ymm5,%ymm4
# qhasm: r0 += ch0
# asm 1: add <ch0=int64#15,<r0=int64#2
# asm 2: add <ch0=%rbp,<r0=%rsi
add %rbp,%rsi
# qhasm: mem256[&w + 96] = X12
# asm 1: vmovupd <X12=reg256#5,<w=stack1280#1
# asm 2: vmovupd <X12=%ymm4,<w=256(%rsp)
vmovupd %ymm4,256(%rsp)
# qhasm: maj0 ^= r2andr3
# asm 1: xor <r2andr3=int64#7,<maj0=int64#14
# asm 2: xor <r2andr3=%rax,<maj0=%rbx
xor %rax,%rbx
# qhasm: r1Sigma0 = r1>>>28
# asm 1: rorx $28,<r1=int64#4,>r1Sigma0=int64#7
# asm 2: rorx $28,<r1=%rcx,>r1Sigma0=%rax
rorx $28,%rcx,%rax
# qhasm: 4x D12 = X12 + mem256[constants + 96]
# asm 1: vpaddq 96(<constants=int64#5),<X12=reg256#5,>D12=reg256#6
# asm 2: vpaddq 96(<constants=%r8),<X12=%ymm4,>D12=%ymm5
vpaddq 96(%r8),%ymm4,%ymm5
# qhasm: wc12131415 = D12
# asm 1: vmovapd <D12=reg256#6,>wc12131415=stack256#5
# asm 2: vmovapd <D12=%ymm5,>wc12131415=128(%rsp)
vmovapd %ymm5,128(%rsp)
# qhasm: r5Sigma1 ^= r541
# asm 1: xor <r541=int64#12,<r5Sigma1=int64#13
# asm 2: xor <r541=%r14,<r5Sigma1=%r15
xor %r14,%r15
# qhasm: r134 = r1>>>34
# asm 1: rorx $34,<r1=int64#4,>r134=int64#12
# asm 2: rorx $34,<r1=%rcx,>r134=%r14
rorx $34,%rcx,%r14
# qhasm: r0 += r5Sigma1
# asm 1: add <r5Sigma1=int64#13,<r0=int64#2
# asm 2: add <r5Sigma1=%r15,<r0=%rsi
add %r15,%rsi
# qhasm: r1Sigma0 ^= r134
# asm 1: xor <r134=int64#12,<r1Sigma0=int64#7
# asm 2: xor <r134=%r14,<r1Sigma0=%rax
xor %r14,%rax
# qhasm: r139 = r1>>>39
# asm 1: rorx $39,<r1=int64#4,>r139=int64#12
# asm 2: rorx $39,<r1=%rcx,>r139=%r14
rorx $39,%rcx,%r14
# qhasm: r4 += r0
# asm 1: add <r0=int64#2,<r4=int64#9
# asm 2: add <r0=%rsi,<r4=%r11
add %rsi,%r11
# qhasm: r0 += maj0
# asm 1: add <maj0=int64#14,<r0=int64#2
# asm 2: add <maj0=%rbx,<r0=%rsi
add %rbx,%rsi
# qhasm: r1Sigma0 ^= r139
# asm 1: xor <r139=int64#12,<r1Sigma0=int64#7
# asm 2: xor <r139=%r14,<r1Sigma0=%rax
xor %r14,%rax
# qhasm: r0 += r1Sigma0
# asm 1: add <r1Sigma0=int64#7,<r0=int64#2
# asm 2: add <r1Sigma0=%rax,<r0=%rsi
add %rax,%rsi
# qhasm: =? i -= 1
# asm 1: dec <i=int64#1
# asm 2: dec <i=%rdi
dec %rdi
# comment:fp stack unchanged by jump
# qhasm: goto innerloop if !=
jne ._innerloop
# qhasm: r7 += wc891011[0]
# asm 1: addq <wc891011=stack256#3,<r7=int64#11
# asm 2: addq <wc891011=64(%rsp),<r7=%r13
addq 64(%rsp),%r13
# qhasm: r4Sigma1 = r4>>>14
# asm 1: rorx $14,<r4=int64#9,>r4Sigma1=int64#1
# asm 2: rorx $14,<r4=%r11,>r4Sigma1=%rdi
rorx $14,%r11,%rdi
# qhasm: ch7 = r6
# asm 1: mov <r6=int64#10,>ch7=int64#5
# asm 2: mov <r6=%r12,>ch7=%r8
mov %r12,%r8
# qhasm: r418 = r4>>>18
# asm 1: rorx $18,<r4=int64#9,>r418=int64#7
# asm 2: rorx $18,<r4=%r11,>r418=%rax
rorx $18,%r11,%rax
# qhasm: ch7 ^= r5
# asm 1: xor <r5=int64#8,<ch7=int64#5
# asm 2: xor <r5=%r10,<ch7=%r8
xor %r10,%r8
# qhasm: r441 = r4>>>41
# asm 1: rorx $41,<r4=int64#9,>r441=int64#12
# asm 2: rorx $41,<r4=%r11,>r441=%r14
rorx $41,%r11,%r14
# qhasm: r4Sigma1 ^= r418
# asm 1: xor <r418=int64#7,<r4Sigma1=int64#1
# asm 2: xor <r418=%rax,<r4Sigma1=%rdi
xor %rax,%rdi
# qhasm: ch7 &= r4
# asm 1: and <r4=int64#9,<ch7=int64#5
# asm 2: and <r4=%r11,<ch7=%r8
and %r11,%r8
# qhasm: r0Sigma0 = r0>>>28
# asm 1: rorx $28,<r0=int64#2,>r0Sigma0=int64#7
# asm 2: rorx $28,<r0=%rsi,>r0Sigma0=%rax
rorx $28,%rsi,%rax
# qhasm: r4Sigma1 ^= r441
# asm 1: xor <r441=int64#12,<r4Sigma1=int64#1
# asm 2: xor <r441=%r14,<r4Sigma1=%rdi
xor %r14,%rdi
# qhasm: ch7 ^= r6
# asm 1: xor <r6=int64#10,<ch7=int64#5
# asm 2: xor <r6=%r12,<ch7=%r8
xor %r12,%r8
# qhasm: r034 = r0>>>34
# asm 1: rorx $34,<r0=int64#2,>r034=int64#12
# asm 2: rorx $34,<r0=%rsi,>r034=%r14
rorx $34,%rsi,%r14
# qhasm: maj6 = r1
# asm 1: mov <r1=int64#4,>maj6=int64#13
# asm 2: mov <r1=%rcx,>maj6=%r15
mov %rcx,%r15
# qhasm: maj6 ^= r0
# asm 1: xor <r0=int64#2,<maj6=int64#13
# asm 2: xor <r0=%rsi,<maj6=%r15
xor %rsi,%r15
# qhasm: r0Sigma0 ^= r034
# asm 1: xor <r034=int64#12,<r0Sigma0=int64#7
# asm 2: xor <r034=%r14,<r0Sigma0=%rax
xor %r14,%rax
# qhasm: r7 += ch7
# asm 1: add <ch7=int64#5,<r7=int64#11
# asm 2: add <ch7=%r8,<r7=%r13
add %r8,%r13
# qhasm: r0andr1 = r1
# asm 1: mov <r1=int64#4,>r0andr1=int64#5
# asm 2: mov <r1=%rcx,>r0andr1=%r8
mov %rcx,%r8
# qhasm: r039 = r0>>>39
# asm 1: rorx $39,<r0=int64#2,>r039=int64#12
# asm 2: rorx $39,<r0=%rsi,>r039=%r14
rorx $39,%rsi,%r14
# qhasm: r0andr1 &= r0
# asm 1: and <r0=int64#2,<r0andr1=int64#5
# asm 2: and <r0=%rsi,<r0andr1=%r8
and %rsi,%r8
# qhasm: r0Sigma0 ^= r039
# asm 1: xor <r039=int64#12,<r0Sigma0=int64#7
# asm 2: xor <r039=%r14,<r0Sigma0=%rax
xor %r14,%rax
# qhasm: r7 += r4Sigma1
# asm 1: add <r4Sigma1=int64#1,<r7=int64#11
# asm 2: add <r4Sigma1=%rdi,<r7=%r13
add %rdi,%r13
# qhasm: maj7 = r2
# asm 1: mov <r2=int64#3,>maj7=int64#1
# asm 2: mov <r2=%rdx,>maj7=%rdi
mov %rdx,%rdi
# qhasm: r6 += wc891011[1]
# asm 1: addq <wc891011=stack256#3,<r6=int64#10
# asm 2: addq <wc891011=72(%rsp),<r6=%r12
addq 72(%rsp),%r12
# qhasm: maj7 &= maj6
# asm 1: and <maj6=int64#13,<maj7=int64#1
# asm 2: and <maj6=%r15,<maj7=%rdi
and %r15,%rdi
# qhasm: r3 += r7
# asm 1: add <r7=int64#11,<r3=int64#6
# asm 2: add <r7=%r13,<r3=%r9
add %r13,%r9
# qhasm: r7 += r0Sigma0
# asm 1: add <r0Sigma0=int64#7,<r7=int64#11
# asm 2: add <r0Sigma0=%rax,<r7=%r13
add %rax,%r13
# qhasm: ch6 = r5
# asm 1: mov <r5=int64#8,>ch6=int64#7
# asm 2: mov <r5=%r10,>ch6=%rax
mov %r10,%rax
# qhasm: maj7 ^= r0andr1
# asm 1: xor <r0andr1=int64#5,<maj7=int64#1
# asm 2: xor <r0andr1=%r8,<maj7=%rdi
xor %r8,%rdi
# qhasm: ch6 ^= r4
# asm 1: xor <r4=int64#9,<ch6=int64#7
# asm 2: xor <r4=%r11,<ch6=%rax
xor %r11,%rax
# qhasm: r3Sigma1 = r3>>>14
# asm 1: rorx $14,<r3=int64#6,>r3Sigma1=int64#12
# asm 2: rorx $14,<r3=%r9,>r3Sigma1=%r14
rorx $14,%r9,%r14
# qhasm: r7 += maj7
# asm 1: add <maj7=int64#1,<r7=int64#11
# asm 2: add <maj7=%rdi,<r7=%r13
add %rdi,%r13
# qhasm: ch6 &= r3
# asm 1: and <r3=int64#6,<ch6=int64#7
# asm 2: and <r3=%r9,<ch6=%rax
and %r9,%rax
# qhasm: r318 = r3>>>18
# asm 1: rorx $18,<r3=int64#6,>r318=int64#1
# asm 2: rorx $18,<r3=%r9,>r318=%rdi
rorx $18,%r9,%rdi
# qhasm: r3Sigma1 ^= r318
# asm 1: xor <r318=int64#1,<r3Sigma1=int64#12
# asm 2: xor <r318=%rdi,<r3Sigma1=%r14
xor %rdi,%r14
# qhasm: maj6 &= r7
# asm 1: and <r7=int64#11,<maj6=int64#13
# asm 2: and <r7=%r13,<maj6=%r15
and %r13,%r15
# qhasm: ch6 ^= r5
# asm 1: xor <r5=int64#8,<ch6=int64#7
# asm 2: xor <r5=%r10,<ch6=%rax
xor %r10,%rax
# qhasm: r341 = r3>>>41
# asm 1: rorx $41,<r3=int64#6,>r341=int64#1
# asm 2: rorx $41,<r3=%r9,>r341=%rdi
rorx $41,%r9,%rdi
# qhasm: r3Sigma1 ^= r341
# asm 1: xor <r341=int64#1,<r3Sigma1=int64#12
# asm 2: xor <r341=%rdi,<r3Sigma1=%r14
xor %rdi,%r14
# qhasm: r7Sigma0 = r7>>>28
# asm 1: rorx $28,<r7=int64#11,>r7Sigma0=int64#1
# asm 2: rorx $28,<r7=%r13,>r7Sigma0=%rdi
rorx $28,%r13,%rdi
# qhasm: maj6 ^= r0andr1
# asm 1: xor <r0andr1=int64#5,<maj6=int64#13
# asm 2: xor <r0andr1=%r8,<maj6=%r15
xor %r8,%r15
# qhasm: r6 += ch6
# asm 1: add <ch6=int64#7,<r6=int64#10
# asm 2: add <ch6=%rax,<r6=%r12
add %rax,%r12
# qhasm: r6 += r3Sigma1
# asm 1: add <r3Sigma1=int64#12,<r6=int64#10
# asm 2: add <r3Sigma1=%r14,<r6=%r12
add %r14,%r12
# qhasm: r734 = r7>>>34
# asm 1: rorx $34,<r7=int64#11,>r734=int64#5
# asm 2: rorx $34,<r7=%r13,>r734=%r8
rorx $34,%r13,%r8
# qhasm: r7Sigma0 ^= r734
# asm 1: xor <r734=int64#5,<r7Sigma0=int64#1
# asm 2: xor <r734=%r8,<r7Sigma0=%rdi
xor %r8,%rdi
# qhasm: r2 += r6
# asm 1: add <r6=int64#10,<r2=int64#3
# asm 2: add <r6=%r12,<r2=%rdx
add %r12,%rdx
# qhasm: r6 += maj6
# asm 1: add <maj6=int64#13,<r6=int64#10
# asm 2: add <maj6=%r15,<r6=%r12
add %r15,%r12
# qhasm: r739 = r7>>>39
# asm 1: rorx $39,<r7=int64#11,>r739=int64#5
# asm 2: rorx $39,<r7=%r13,>r739=%r8
rorx $39,%r13,%r8
# qhasm: r7Sigma0 ^= r739
# asm 1: xor <r739=int64#5,<r7Sigma0=int64#1
# asm 2: xor <r739=%r8,<r7Sigma0=%rdi
xor %r8,%rdi
# qhasm: r6 += r7Sigma0
# asm 1: add <r7Sigma0=int64#1,<r6=int64#10
# asm 2: add <r7Sigma0=%rdi,<r6=%r12
add %rdi,%r12
# qhasm: in = in_stack
# asm 1: movq <in_stack=stack64#11,>in=int64#1
# asm 2: movq <in_stack=400(%rsp),>in=%rdi
movq 400(%rsp),%rdi
# qhasm: r5 += wc891011[2]
# asm 1: addq <wc891011=stack256#3,<r5=int64#8
# asm 2: addq <wc891011=80(%rsp),<r5=%r10
addq 80(%rsp),%r10
# qhasm: r2Sigma1 = r2>>>14
# asm 1: rorx $14,<r2=int64#3,>r2Sigma1=int64#5
# asm 2: rorx $14,<r2=%rdx,>r2Sigma1=%r8
rorx $14,%rdx,%r8
# qhasm: ch5 = r4
# asm 1: mov <r4=int64#9,>ch5=int64#7
# asm 2: mov <r4=%r11,>ch5=%rax
mov %r11,%rax
# qhasm: r218 = r2>>>18
# asm 1: rorx $18,<r2=int64#3,>r218=int64#12
# asm 2: rorx $18,<r2=%rdx,>r218=%r14
rorx $18,%rdx,%r14
# qhasm: ch5 ^= r3
# asm 1: xor <r3=int64#6,<ch5=int64#7
# asm 2: xor <r3=%r9,<ch5=%rax
xor %r9,%rax
# qhasm: r241 = r2>>>41
# asm 1: rorx $41,<r2=int64#3,>r241=int64#13
# asm 2: rorx $41,<r2=%rdx,>r241=%r15
rorx $41,%rdx,%r15
# qhasm: r2Sigma1 ^= r218
# asm 1: xor <r218=int64#12,<r2Sigma1=int64#5
# asm 2: xor <r218=%r14,<r2Sigma1=%r8
xor %r14,%r8
# qhasm: ch5 &= r2
# asm 1: and <r2=int64#3,<ch5=int64#7
# asm 2: and <r2=%rdx,<ch5=%rax
and %rdx,%rax
# qhasm: r6Sigma0 = r6>>>28
# asm 1: rorx $28,<r6=int64#10,>r6Sigma0=int64#12
# asm 2: rorx $28,<r6=%r12,>r6Sigma0=%r14
rorx $28,%r12,%r14
# qhasm: r2Sigma1 ^= r241
# asm 1: xor <r241=int64#13,<r2Sigma1=int64#5
# asm 2: xor <r241=%r15,<r2Sigma1=%r8
xor %r15,%r8
# qhasm: ch5 ^= r4
# asm 1: xor <r4=int64#9,<ch5=int64#7
# asm 2: xor <r4=%r11,<ch5=%rax
xor %r11,%rax
# qhasm: r634 = r6>>>34
# asm 1: rorx $34,<r6=int64#10,>r634=int64#13
# asm 2: rorx $34,<r6=%r12,>r634=%r15
rorx $34,%r12,%r15
# qhasm: maj4 = r7
# asm 1: mov <r7=int64#11,>maj4=int64#14
# asm 2: mov <r7=%r13,>maj4=%rbx
mov %r13,%rbx
# qhasm: maj4 ^= r6
# asm 1: xor <r6=int64#10,<maj4=int64#14
# asm 2: xor <r6=%r12,<maj4=%rbx
xor %r12,%rbx
# qhasm: r6Sigma0 ^= r634
# asm 1: xor <r634=int64#13,<r6Sigma0=int64#12
# asm 2: xor <r634=%r15,<r6Sigma0=%r14
xor %r15,%r14
# qhasm: r5 += ch5
# asm 1: add <ch5=int64#7,<r5=int64#8
# asm 2: add <ch5=%rax,<r5=%r10
add %rax,%r10
# qhasm: r6andr7 = r7
# asm 1: mov <r7=int64#11,>r6andr7=int64#7
# asm 2: mov <r7=%r13,>r6andr7=%rax
mov %r13,%rax
# qhasm: r639 = r6>>>39
# asm 1: rorx $39,<r6=int64#10,>r639=int64#13
# asm 2: rorx $39,<r6=%r12,>r639=%r15
rorx $39,%r12,%r15
# qhasm: r6andr7 &= r6
# asm 1: and <r6=int64#10,<r6andr7=int64#7
# asm 2: and <r6=%r12,<r6andr7=%rax
and %r12,%rax
# qhasm: r6Sigma0 ^= r639
# asm 1: xor <r639=int64#13,<r6Sigma0=int64#12
# asm 2: xor <r639=%r15,<r6Sigma0=%r14
xor %r15,%r14
# qhasm: r5 += r2Sigma1
# asm 1: add <r2Sigma1=int64#5,<r5=int64#8
# asm 2: add <r2Sigma1=%r8,<r5=%r10
add %r8,%r10
# qhasm: maj5 = r0
# asm 1: mov <r0=int64#2,>maj5=int64#5
# asm 2: mov <r0=%rsi,>maj5=%r8
mov %rsi,%r8
# qhasm: r4 += wc891011[3]
# asm 1: addq <wc891011=stack256#3,<r4=int64#9
# asm 2: addq <wc891011=88(%rsp),<r4=%r11
addq 88(%rsp),%r11
# qhasm: maj5 &= maj4
# asm 1: and <maj4=int64#14,<maj5=int64#5
# asm 2: and <maj4=%rbx,<maj5=%r8
and %rbx,%r8
# qhasm: r1 += r5
# asm 1: add <r5=int64#8,<r1=int64#4
# asm 2: add <r5=%r10,<r1=%rcx
add %r10,%rcx
# qhasm: r5 += r6Sigma0
# asm 1: add <r6Sigma0=int64#12,<r5=int64#8
# asm 2: add <r6Sigma0=%r14,<r5=%r10
add %r14,%r10
# qhasm: ch4 = r3
# asm 1: mov <r3=int64#6,>ch4=int64#12
# asm 2: mov <r3=%r9,>ch4=%r14
mov %r9,%r14
# qhasm: maj5 ^= r6andr7
# asm 1: xor <r6andr7=int64#7,<maj5=int64#5
# asm 2: xor <r6andr7=%rax,<maj5=%r8
xor %rax,%r8
# qhasm: ch4 ^= r2
# asm 1: xor <r2=int64#3,<ch4=int64#12
# asm 2: xor <r2=%rdx,<ch4=%r14
xor %rdx,%r14
# qhasm: r1Sigma1 = r1>>>14
# asm 1: rorx $14,<r1=int64#4,>r1Sigma1=int64#13
# asm 2: rorx $14,<r1=%rcx,>r1Sigma1=%r15
rorx $14,%rcx,%r15
# qhasm: r5 += maj5
# asm 1: add <maj5=int64#5,<r5=int64#8
# asm 2: add <maj5=%r8,<r5=%r10
add %r8,%r10
# qhasm: ch4 &= r1
# asm 1: and <r1=int64#4,<ch4=int64#12
# asm 2: and <r1=%rcx,<ch4=%r14
and %rcx,%r14
# qhasm: r118 = r1>>>18
# asm 1: rorx $18,<r1=int64#4,>r118=int64#5
# asm 2: rorx $18,<r1=%rcx,>r118=%r8
rorx $18,%rcx,%r8
# qhasm: r1Sigma1 ^= r118
# asm 1: xor <r118=int64#5,<r1Sigma1=int64#13
# asm 2: xor <r118=%r8,<r1Sigma1=%r15
xor %r8,%r15
# qhasm: maj4 &= r5
# asm 1: and <r5=int64#8,<maj4=int64#14
# asm 2: and <r5=%r10,<maj4=%rbx
and %r10,%rbx
# qhasm: ch4 ^= r3
# asm 1: xor <r3=int64#6,<ch4=int64#12
# asm 2: xor <r3=%r9,<ch4=%r14
xor %r9,%r14
# qhasm: r141 = r1>>>41
# asm 1: rorx $41,<r1=int64#4,>r141=int64#5
# asm 2: rorx $41,<r1=%rcx,>r141=%r8
rorx $41,%rcx,%r8
# qhasm: r1Sigma1 ^= r141
# asm 1: xor <r141=int64#5,<r1Sigma1=int64#13
# asm 2: xor <r141=%r8,<r1Sigma1=%r15
xor %r8,%r15
# qhasm: r5Sigma0 = r5>>>28
# asm 1: rorx $28,<r5=int64#8,>r5Sigma0=int64#5
# asm 2: rorx $28,<r5=%r10,>r5Sigma0=%r8
rorx $28,%r10,%r8
# qhasm: maj4 ^= r6andr7
# asm 1: xor <r6andr7=int64#7,<maj4=int64#14
# asm 2: xor <r6andr7=%rax,<maj4=%rbx
xor %rax,%rbx
# qhasm: r4 += ch4
# asm 1: add <ch4=int64#12,<r4=int64#9
# asm 2: add <ch4=%r14,<r4=%r11
add %r14,%r11
# qhasm: r4 += r1Sigma1
# asm 1: add <r1Sigma1=int64#13,<r4=int64#9
# asm 2: add <r1Sigma1=%r15,<r4=%r11
add %r15,%r11
# qhasm: r534 = r5>>>34
# asm 1: rorx $34,<r5=int64#8,>r534=int64#7
# asm 2: rorx $34,<r5=%r10,>r534=%rax
rorx $34,%r10,%rax
# qhasm: r5Sigma0 ^= r534
# asm 1: xor <r534=int64#7,<r5Sigma0=int64#5
# asm 2: xor <r534=%rax,<r5Sigma0=%r8
xor %rax,%r8
# qhasm: r0 += r4
# asm 1: add <r4=int64#9,<r0=int64#2
# asm 2: add <r4=%r11,<r0=%rsi
add %r11,%rsi
# qhasm: r4 += maj4
# asm 1: add <maj4=int64#14,<r4=int64#9
# asm 2: add <maj4=%rbx,<r4=%r11
add %rbx,%r11
# qhasm: r539 = r5>>>39
# asm 1: rorx $39,<r5=int64#8,>r539=int64#7
# asm 2: rorx $39,<r5=%r10,>r539=%rax
rorx $39,%r10,%rax
# qhasm: r5Sigma0 ^= r539
# asm 1: xor <r539=int64#7,<r5Sigma0=int64#5
# asm 2: xor <r539=%rax,<r5Sigma0=%r8
xor %rax,%r8
# qhasm: r4 += r5Sigma0
# asm 1: add <r5Sigma0=int64#5,<r4=int64#9
# asm 2: add <r5Sigma0=%r8,<r4=%r11
add %r8,%r11
# qhasm: inlen = inlen_stack
# asm 1: movq <inlen_stack=stack64#8,>inlen=int64#7
# asm 2: movq <inlen_stack=376(%rsp),>inlen=%rax
movq 376(%rsp),%rax
# qhasm: in += 128
# asm 1: add $128,<in=int64#1
# asm 2: add $128,<in=%rdi
add $128,%rdi
# qhasm: r3 += wc12131415[0]
# asm 1: addq <wc12131415=stack256#5,<r3=int64#6
# asm 2: addq <wc12131415=128(%rsp),<r3=%r9
addq 128(%rsp),%r9
# qhasm: r0Sigma1 = r0>>>14
# asm 1: rorx $14,<r0=int64#2,>r0Sigma1=int64#5
# asm 2: rorx $14,<r0=%rsi,>r0Sigma1=%r8
rorx $14,%rsi,%r8
# qhasm: ch3 = r2
# asm 1: mov <r2=int64#3,>ch3=int64#12
# asm 2: mov <r2=%rdx,>ch3=%r14
mov %rdx,%r14
# qhasm: r018 = r0>>>18
# asm 1: rorx $18,<r0=int64#2,>r018=int64#13
# asm 2: rorx $18,<r0=%rsi,>r018=%r15
rorx $18,%rsi,%r15
# qhasm: ch3 ^= r1
# asm 1: xor <r1=int64#4,<ch3=int64#12
# asm 2: xor <r1=%rcx,<ch3=%r14
xor %rcx,%r14
# qhasm: r041 = r0>>>41
# asm 1: rorx $41,<r0=int64#2,>r041=int64#14
# asm 2: rorx $41,<r0=%rsi,>r041=%rbx
rorx $41,%rsi,%rbx
# qhasm: r0Sigma1 ^= r018
# asm 1: xor <r018=int64#13,<r0Sigma1=int64#5
# asm 2: xor <r018=%r15,<r0Sigma1=%r8
xor %r15,%r8
# qhasm: ch3 &= r0
# asm 1: and <r0=int64#2,<ch3=int64#12
# asm 2: and <r0=%rsi,<ch3=%r14
and %rsi,%r14
# qhasm: r4Sigma0 = r4>>>28
# asm 1: rorx $28,<r4=int64#9,>r4Sigma0=int64#13
# asm 2: rorx $28,<r4=%r11,>r4Sigma0=%r15
rorx $28,%r11,%r15
# qhasm: r0Sigma1 ^= r041
# asm 1: xor <r041=int64#14,<r0Sigma1=int64#5
# asm 2: xor <r041=%rbx,<r0Sigma1=%r8
xor %rbx,%r8
# qhasm: ch3 ^= r2
# asm 1: xor <r2=int64#3,<ch3=int64#12
# asm 2: xor <r2=%rdx,<ch3=%r14
xor %rdx,%r14
# qhasm: r434 = r4>>>34
# asm 1: rorx $34,<r4=int64#9,>r434=int64#14
# asm 2: rorx $34,<r4=%r11,>r434=%rbx
rorx $34,%r11,%rbx
# qhasm: maj2 = r5
# asm 1: mov <r5=int64#8,>maj2=int64#15
# asm 2: mov <r5=%r10,>maj2=%rbp
mov %r10,%rbp
# qhasm: maj2 ^= r4
# asm 1: xor <r4=int64#9,<maj2=int64#15
# asm 2: xor <r4=%r11,<maj2=%rbp
xor %r11,%rbp
# qhasm: r4Sigma0 ^= r434
# asm 1: xor <r434=int64#14,<r4Sigma0=int64#13
# asm 2: xor <r434=%rbx,<r4Sigma0=%r15
xor %rbx,%r15
# qhasm: r3 += ch3
# asm 1: add <ch3=int64#12,<r3=int64#6
# asm 2: add <ch3=%r14,<r3=%r9
add %r14,%r9
# qhasm: r4andr5 = r5
# asm 1: mov <r5=int64#8,>r4andr5=int64#12
# asm 2: mov <r5=%r10,>r4andr5=%r14
mov %r10,%r14
# qhasm: r439 = r4>>>39
# asm 1: rorx $39,<r4=int64#9,>r439=int64#14
# asm 2: rorx $39,<r4=%r11,>r439=%rbx
rorx $39,%r11,%rbx
# qhasm: r4andr5 &= r4
# asm 1: and <r4=int64#9,<r4andr5=int64#12
# asm 2: and <r4=%r11,<r4andr5=%r14
and %r11,%r14
# qhasm: r4Sigma0 ^= r439
# asm 1: xor <r439=int64#14,<r4Sigma0=int64#13
# asm 2: xor <r439=%rbx,<r4Sigma0=%r15
xor %rbx,%r15
# qhasm: r3 += r0Sigma1
# asm 1: add <r0Sigma1=int64#5,<r3=int64#6
# asm 2: add <r0Sigma1=%r8,<r3=%r9
add %r8,%r9
# qhasm: maj3 = r6
# asm 1: mov <r6=int64#10,>maj3=int64#5
# asm 2: mov <r6=%r12,>maj3=%r8
mov %r12,%r8
# qhasm: r2 += wc12131415[1]
# asm 1: addq <wc12131415=stack256#5,<r2=int64#3
# asm 2: addq <wc12131415=136(%rsp),<r2=%rdx
addq 136(%rsp),%rdx
# qhasm: maj3 &= maj2
# asm 1: and <maj2=int64#15,<maj3=int64#5
# asm 2: and <maj2=%rbp,<maj3=%r8
and %rbp,%r8
# qhasm: r7 += r3
# asm 1: add <r3=int64#6,<r7=int64#11
# asm 2: add <r3=%r9,<r7=%r13
add %r9,%r13
# qhasm: r3 += r4Sigma0
# asm 1: add <r4Sigma0=int64#13,<r3=int64#6
# asm 2: add <r4Sigma0=%r15,<r3=%r9
add %r15,%r9
# qhasm: ch2 = r1
# asm 1: mov <r1=int64#4,>ch2=int64#13
# asm 2: mov <r1=%rcx,>ch2=%r15
mov %rcx,%r15
# qhasm: maj3 ^= r4andr5
# asm 1: xor <r4andr5=int64#12,<maj3=int64#5
# asm 2: xor <r4andr5=%r14,<maj3=%r8
xor %r14,%r8
# qhasm: ch2 ^= r0
# asm 1: xor <r0=int64#2,<ch2=int64#13
# asm 2: xor <r0=%rsi,<ch2=%r15
xor %rsi,%r15
# qhasm: r7Sigma1 = r7>>>14
# asm 1: rorx $14,<r7=int64#11,>r7Sigma1=int64#14
# asm 2: rorx $14,<r7=%r13,>r7Sigma1=%rbx
rorx $14,%r13,%rbx
# qhasm: r3 += maj3
# asm 1: add <maj3=int64#5,<r3=int64#6
# asm 2: add <maj3=%r8,<r3=%r9
add %r8,%r9
# qhasm: ch2 &= r7
# asm 1: and <r7=int64#11,<ch2=int64#13
# asm 2: and <r7=%r13,<ch2=%r15
and %r13,%r15
# qhasm: r718 = r7>>>18
# asm 1: rorx $18,<r7=int64#11,>r718=int64#5
# asm 2: rorx $18,<r7=%r13,>r718=%r8
rorx $18,%r13,%r8
# qhasm: r7Sigma1 ^= r718
# asm 1: xor <r718=int64#5,<r7Sigma1=int64#14
# asm 2: xor <r718=%r8,<r7Sigma1=%rbx
xor %r8,%rbx
# qhasm: maj2 &= r3
# asm 1: and <r3=int64#6,<maj2=int64#15
# asm 2: and <r3=%r9,<maj2=%rbp
and %r9,%rbp
# qhasm: ch2 ^= r1
# asm 1: xor <r1=int64#4,<ch2=int64#13
# asm 2: xor <r1=%rcx,<ch2=%r15
xor %rcx,%r15
# qhasm: r741 = r7>>>41
# asm 1: rorx $41,<r7=int64#11,>r741=int64#5
# asm 2: rorx $41,<r7=%r13,>r741=%r8
rorx $41,%r13,%r8
# qhasm: r7Sigma1 ^= r741
# asm 1: xor <r741=int64#5,<r7Sigma1=int64#14
# asm 2: xor <r741=%r8,<r7Sigma1=%rbx
xor %r8,%rbx
# qhasm: r3Sigma0 = r3>>>28
# asm 1: rorx $28,<r3=int64#6,>r3Sigma0=int64#5
# asm 2: rorx $28,<r3=%r9,>r3Sigma0=%r8
rorx $28,%r9,%r8
# qhasm: maj2 ^= r4andr5
# asm 1: xor <r4andr5=int64#12,<maj2=int64#15
# asm 2: xor <r4andr5=%r14,<maj2=%rbp
xor %r14,%rbp
# qhasm: r2 += ch2
# asm 1: add <ch2=int64#13,<r2=int64#3
# asm 2: add <ch2=%r15,<r2=%rdx
add %r15,%rdx
# qhasm: r2 += r7Sigma1
# asm 1: add <r7Sigma1=int64#14,<r2=int64#3
# asm 2: add <r7Sigma1=%rbx,<r2=%rdx
add %rbx,%rdx
# qhasm: r334 = r3>>>34
# asm 1: rorx $34,<r3=int64#6,>r334=int64#12
# asm 2: rorx $34,<r3=%r9,>r334=%r14
rorx $34,%r9,%r14
# qhasm: r3Sigma0 ^= r334
# asm 1: xor <r334=int64#12,<r3Sigma0=int64#5
# asm 2: xor <r334=%r14,<r3Sigma0=%r8
xor %r14,%r8
# qhasm: r6 += r2
# asm 1: add <r2=int64#3,<r6=int64#10
# asm 2: add <r2=%rdx,<r6=%r12
add %rdx,%r12
# qhasm: r2 += maj2
# asm 1: add <maj2=int64#15,<r2=int64#3
# asm 2: add <maj2=%rbp,<r2=%rdx
add %rbp,%rdx
# qhasm: r339 = r3>>>39
# asm 1: rorx $39,<r3=int64#6,>r339=int64#12
# asm 2: rorx $39,<r3=%r9,>r339=%r14
rorx $39,%r9,%r14
# qhasm: r3Sigma0 ^= r339
# asm 1: xor <r339=int64#12,<r3Sigma0=int64#5
# asm 2: xor <r339=%r14,<r3Sigma0=%r8
xor %r14,%r8
# qhasm: r2 += r3Sigma0
# asm 1: add <r3Sigma0=int64#5,<r2=int64#3
# asm 2: add <r3Sigma0=%r8,<r2=%rdx
add %r8,%rdx
# qhasm: inlen -= 128
# asm 1: sub $128,<inlen=int64#7
# asm 2: sub $128,<inlen=%rax
sub $128,%rax
# qhasm: r1 += wc12131415[2]
# asm 1: addq <wc12131415=stack256#5,<r1=int64#4
# asm 2: addq <wc12131415=144(%rsp),<r1=%rcx
addq 144(%rsp),%rcx
# qhasm: r6Sigma1 = r6>>>14
# asm 1: rorx $14,<r6=int64#10,>r6Sigma1=int64#5
# asm 2: rorx $14,<r6=%r12,>r6Sigma1=%r8
rorx $14,%r12,%r8
# qhasm: ch1 = r0
# asm 1: mov <r0=int64#2,>ch1=int64#12
# asm 2: mov <r0=%rsi,>ch1=%r14
mov %rsi,%r14
# qhasm: r618 = r6>>>18
# asm 1: rorx $18,<r6=int64#10,>r618=int64#13
# asm 2: rorx $18,<r6=%r12,>r618=%r15
rorx $18,%r12,%r15
# qhasm: ch1 ^= r7
# asm 1: xor <r7=int64#11,<ch1=int64#12
# asm 2: xor <r7=%r13,<ch1=%r14
xor %r13,%r14
# qhasm: r641 = r6>>>41
# asm 1: rorx $41,<r6=int64#10,>r641=int64#14
# asm 2: rorx $41,<r6=%r12,>r641=%rbx
rorx $41,%r12,%rbx
# qhasm: r6Sigma1 ^= r618
# asm 1: xor <r618=int64#13,<r6Sigma1=int64#5
# asm 2: xor <r618=%r15,<r6Sigma1=%r8
xor %r15,%r8
# qhasm: ch1 &= r6
# asm 1: and <r6=int64#10,<ch1=int64#12
# asm 2: and <r6=%r12,<ch1=%r14
and %r12,%r14
# qhasm: r2Sigma0 = r2>>>28
# asm 1: rorx $28,<r2=int64#3,>r2Sigma0=int64#13
# asm 2: rorx $28,<r2=%rdx,>r2Sigma0=%r15
rorx $28,%rdx,%r15
# qhasm: r6Sigma1 ^= r641
# asm 1: xor <r641=int64#14,<r6Sigma1=int64#5
# asm 2: xor <r641=%rbx,<r6Sigma1=%r8
xor %rbx,%r8
# qhasm: ch1 ^= r0
# asm 1: xor <r0=int64#2,<ch1=int64#12
# asm 2: xor <r0=%rsi,<ch1=%r14
xor %rsi,%r14
# qhasm: r234 = r2>>>34
# asm 1: rorx $34,<r2=int64#3,>r234=int64#14
# asm 2: rorx $34,<r2=%rdx,>r234=%rbx
rorx $34,%rdx,%rbx
# qhasm: maj0 = r3
# asm 1: mov <r3=int64#6,>maj0=int64#15
# asm 2: mov <r3=%r9,>maj0=%rbp
mov %r9,%rbp
# qhasm: maj0 ^= r2
# asm 1: xor <r2=int64#3,<maj0=int64#15
# asm 2: xor <r2=%rdx,<maj0=%rbp
xor %rdx,%rbp
# qhasm: r2Sigma0 ^= r234
# asm 1: xor <r234=int64#14,<r2Sigma0=int64#13
# asm 2: xor <r234=%rbx,<r2Sigma0=%r15
xor %rbx,%r15
# qhasm: r1 += ch1
# asm 1: add <ch1=int64#12,<r1=int64#4
# asm 2: add <ch1=%r14,<r1=%rcx
add %r14,%rcx
# qhasm: r2andr3 = r3
# asm 1: mov <r3=int64#6,>r2andr3=int64#12
# asm 2: mov <r3=%r9,>r2andr3=%r14
mov %r9,%r14
# qhasm: r239 = r2>>>39
# asm 1: rorx $39,<r2=int64#3,>r239=int64#14
# asm 2: rorx $39,<r2=%rdx,>r239=%rbx
rorx $39,%rdx,%rbx
# qhasm: r2andr3 &= r2
# asm 1: and <r2=int64#3,<r2andr3=int64#12
# asm 2: and <r2=%rdx,<r2andr3=%r14
and %rdx,%r14
# qhasm: r2Sigma0 ^= r239
# asm 1: xor <r239=int64#14,<r2Sigma0=int64#13
# asm 2: xor <r239=%rbx,<r2Sigma0=%r15
xor %rbx,%r15
# qhasm: r1 += r6Sigma1
# asm 1: add <r6Sigma1=int64#5,<r1=int64#4
# asm 2: add <r6Sigma1=%r8,<r1=%rcx
add %r8,%rcx
# qhasm: maj1 = r4
# asm 1: mov <r4=int64#9,>maj1=int64#5
# asm 2: mov <r4=%r11,>maj1=%r8
mov %r11,%r8
# qhasm: r0 += wc12131415[3]
# asm 1: addq <wc12131415=stack256#5,<r0=int64#2
# asm 2: addq <wc12131415=152(%rsp),<r0=%rsi
addq 152(%rsp),%rsi
# qhasm: maj1 &= maj0
# asm 1: and <maj0=int64#15,<maj1=int64#5
# asm 2: and <maj0=%rbp,<maj1=%r8
and %rbp,%r8
# qhasm: r5 += r1
# asm 1: add <r1=int64#4,<r5=int64#8
# asm 2: add <r1=%rcx,<r5=%r10
add %rcx,%r10
# qhasm: r1 += r2Sigma0
# asm 1: add <r2Sigma0=int64#13,<r1=int64#4
# asm 2: add <r2Sigma0=%r15,<r1=%rcx
add %r15,%rcx
# qhasm: ch0 = r7
# asm 1: mov <r7=int64#11,>ch0=int64#13
# asm 2: mov <r7=%r13,>ch0=%r15
mov %r13,%r15
# qhasm: maj1 ^= r2andr3
# asm 1: xor <r2andr3=int64#12,<maj1=int64#5
# asm 2: xor <r2andr3=%r14,<maj1=%r8
xor %r14,%r8
# qhasm: ch0 ^= r6
# asm 1: xor <r6=int64#10,<ch0=int64#13
# asm 2: xor <r6=%r12,<ch0=%r15
xor %r12,%r15
# qhasm: r5Sigma1 = r5>>>14
# asm 1: rorx $14,<r5=int64#8,>r5Sigma1=int64#14
# asm 2: rorx $14,<r5=%r10,>r5Sigma1=%rbx
rorx $14,%r10,%rbx
# qhasm: r1 += maj1
# asm 1: add <maj1=int64#5,<r1=int64#4
# asm 2: add <maj1=%r8,<r1=%rcx
add %r8,%rcx
# qhasm: ch0 &= r5
# asm 1: and <r5=int64#8,<ch0=int64#13
# asm 2: and <r5=%r10,<ch0=%r15
and %r10,%r15
# qhasm: r518 = r5>>>18
# asm 1: rorx $18,<r5=int64#8,>r518=int64#5
# asm 2: rorx $18,<r5=%r10,>r518=%r8
rorx $18,%r10,%r8
# qhasm: r5Sigma1 ^= r518
# asm 1: xor <r518=int64#5,<r5Sigma1=int64#14
# asm 2: xor <r518=%r8,<r5Sigma1=%rbx
xor %r8,%rbx
# qhasm: maj0 &= r1
# asm 1: and <r1=int64#4,<maj0=int64#15
# asm 2: and <r1=%rcx,<maj0=%rbp
and %rcx,%rbp
# qhasm: ch0 ^= r7
# asm 1: xor <r7=int64#11,<ch0=int64#13
# asm 2: xor <r7=%r13,<ch0=%r15
xor %r13,%r15
# qhasm: r541 = r5>>>41
# asm 1: rorx $41,<r5=int64#8,>r541=int64#5
# asm 2: rorx $41,<r5=%r10,>r541=%r8
rorx $41,%r10,%r8
# qhasm: r5Sigma1 ^= r541
# asm 1: xor <r541=int64#5,<r5Sigma1=int64#14
# asm 2: xor <r541=%r8,<r5Sigma1=%rbx
xor %r8,%rbx
# qhasm: r1Sigma0 = r1>>>28
# asm 1: rorx $28,<r1=int64#4,>r1Sigma0=int64#5
# asm 2: rorx $28,<r1=%rcx,>r1Sigma0=%r8
rorx $28,%rcx,%r8
# qhasm: maj0 ^= r2andr3
# asm 1: xor <r2andr3=int64#12,<maj0=int64#15
# asm 2: xor <r2andr3=%r14,<maj0=%rbp
xor %r14,%rbp
# qhasm: r0 += ch0
# asm 1: add <ch0=int64#13,<r0=int64#2
# asm 2: add <ch0=%r15,<r0=%rsi
add %r15,%rsi
# qhasm: r0 += r5Sigma1
# asm 1: add <r5Sigma1=int64#14,<r0=int64#2
# asm 2: add <r5Sigma1=%rbx,<r0=%rsi
add %rbx,%rsi
# qhasm: r134 = r1>>>34
# asm 1: rorx $34,<r1=int64#4,>r134=int64#12
# asm 2: rorx $34,<r1=%rcx,>r134=%r14
rorx $34,%rcx,%r14
# qhasm: r1Sigma0 ^= r134
# asm 1: xor <r134=int64#12,<r1Sigma0=int64#5
# asm 2: xor <r134=%r14,<r1Sigma0=%r8
xor %r14,%r8
# qhasm: r4 += r0
# asm 1: add <r0=int64#2,<r4=int64#9
# asm 2: add <r0=%rsi,<r4=%r11
add %rsi,%r11
# qhasm: r0 += maj0
# asm 1: add <maj0=int64#15,<r0=int64#2
# asm 2: add <maj0=%rbp,<r0=%rsi
add %rbp,%rsi
# qhasm: r139 = r1>>>39
# asm 1: rorx $39,<r1=int64#4,>r139=int64#12
# asm 2: rorx $39,<r1=%rcx,>r139=%r14
rorx $39,%rcx,%r14
# qhasm: r1Sigma0 ^= r139
# asm 1: xor <r139=int64#12,<r1Sigma0=int64#5
# asm 2: xor <r139=%r14,<r1Sigma0=%r8
xor %r14,%r8
# qhasm: r0 += r1Sigma0
# asm 1: add <r1Sigma0=int64#5,<r0=int64#2
# asm 2: add <r1Sigma0=%r8,<r0=%rsi
add %r8,%rsi
# qhasm: inlen_stack = inlen
# asm 1: movq <inlen=int64#7,>inlen_stack=stack64#8
# asm 2: movq <inlen=%rax,>inlen_stack=376(%rsp)
movq %rax,376(%rsp)
# qhasm: r7 += state4567[3]
# asm 1: addq <state4567=stack256#2,<r7=int64#11
# asm 2: addq <state4567=56(%rsp),<r7=%r13
addq 56(%rsp),%r13
# qhasm: r3 += state0123[3]
# asm 1: addq <state0123=stack256#1,<r3=int64#6
# asm 2: addq <state0123=24(%rsp),<r3=%r9
addq 24(%rsp),%r9
# qhasm: r6 += state4567[2]
# asm 1: addq <state4567=stack256#2,<r6=int64#10
# asm 2: addq <state4567=48(%rsp),<r6=%r12
addq 48(%rsp),%r12
# qhasm: r2 += state0123[2]
# asm 1: addq <state0123=stack256#1,<r2=int64#3
# asm 2: addq <state0123=16(%rsp),<r2=%rdx
addq 16(%rsp),%rdx
# qhasm: r5 += state4567[1]
# asm 1: addq <state4567=stack256#2,<r5=int64#8
# asm 2: addq <state4567=40(%rsp),<r5=%r10
addq 40(%rsp),%r10
# qhasm: r1 += state0123[1]
# asm 1: addq <state0123=stack256#1,<r1=int64#4
# asm 2: addq <state0123=8(%rsp),<r1=%rcx
addq 8(%rsp),%rcx
# qhasm: r4 += state4567[0]
# asm 1: addq <state4567=stack256#2,<r4=int64#9
# asm 2: addq <state4567=32(%rsp),<r4=%r11
addq 32(%rsp),%r11
# qhasm: constants = constants_stack
# asm 1: movq <constants_stack=stack64#10,>constants=int64#5
# asm 2: movq <constants_stack=392(%rsp),>constants=%r8
movq 392(%rsp),%r8
# qhasm: r0 += state0123[0]
# asm 1: addq <state0123=stack256#1,<r0=int64#2
# asm 2: addq <state0123=0(%rsp),<r0=%rsi
addq 0(%rsp),%rsi
# qhasm: unsigned<? inlen - 128
# asm 1: cmp $128,<inlen=int64#7
# asm 2: cmp $128,<inlen=%rax
cmp $128,%rax
# comment:fp stack unchanged by jump
# qhasm: goto outerloop if !unsigned<
jae ._outerloop
# qhasm: inplace state4567[2] = r6
# asm 1: movq <r6=int64#10,<state4567=stack256#2
# asm 2: movq <r6=%r12,<state4567=48(%rsp)
movq %r12,48(%rsp)
# qhasm: inplace state4567[3] = r7
# asm 1: movq <r7=int64#11,<state4567=stack256#2
# asm 2: movq <r7=%r13,<state4567=56(%rsp)
movq %r13,56(%rsp)
# qhasm: inplace state0123[2] = r2
# asm 1: movq <r2=int64#3,<state0123=stack256#1
# asm 2: movq <r2=%rdx,<state0123=16(%rsp)
movq %rdx,16(%rsp)
# qhasm: inplace state4567[0] = r4
# asm 1: movq <r4=int64#9,<state4567=stack256#2
# asm 2: movq <r4=%r11,<state4567=32(%rsp)
movq %r11,32(%rsp)
# qhasm: inplace state0123[3] = r3
# asm 1: movq <r3=int64#6,<state0123=stack256#1
# asm 2: movq <r3=%r9,<state0123=24(%rsp)
movq %r9,24(%rsp)
# qhasm: inplace state4567[1] = r5
# asm 1: movq <r5=int64#8,<state4567=stack256#2
# asm 2: movq <r5=%r10,<state4567=40(%rsp)
movq %r10,40(%rsp)
# qhasm: inplace state0123[0] = r0
# asm 1: movq <r0=int64#2,<state0123=stack256#1
# asm 2: movq <r0=%rsi,<state0123=0(%rsp)
movq %rsi,0(%rsp)
# qhasm: inplace state0123[1] = r1
# asm 1: movq <r1=int64#4,<state0123=stack256#1
# asm 2: movq <r1=%rcx,<state0123=8(%rsp)
movq %rcx,8(%rsp)
# qhasm: statebytes = statebytes_stack
# asm 1: movq <statebytes_stack=stack64#9,>statebytes=int64#1
# asm 2: movq <statebytes_stack=384(%rsp),>statebytes=%rdi
movq 384(%rsp),%rdi
# qhasm: X0 = state0123
# asm 1: vmovapd <state0123=stack256#1,>X0=reg256#2
# asm 2: vmovapd <state0123=0(%rsp),>X0=%ymm1
vmovapd 0(%rsp),%ymm1
# qhasm: X4 = state4567
# asm 1: vmovapd <state4567=stack256#2,>X4=reg256#3
# asm 2: vmovapd <state4567=32(%rsp),>X4=%ymm2
vmovapd 32(%rsp),%ymm2
# qhasm: 2x 16x X0 = X0[bigendian64]
# asm 1: vpshufb <bigendian64=reg256#1,<X0=reg256#2,>X0=reg256#2
# asm 2: vpshufb <bigendian64=%ymm0,<X0=%ymm1,>X0=%ymm1
vpshufb %ymm0,%ymm1,%ymm1
# qhasm: 2x 16x X4 = X4[bigendian64]
# asm 1: vpshufb <bigendian64=reg256#1,<X4=reg256#3,>X4=reg256#1
# asm 2: vpshufb <bigendian64=%ymm0,<X4=%ymm2,>X4=%ymm0
vpshufb %ymm0,%ymm2,%ymm0
# qhasm: mem256[statebytes+0] = X0
# asm 1: vmovupd <X0=reg256#2,0(<statebytes=int64#1)
# asm 2: vmovupd <X0=%ymm1,0(<statebytes=%rdi)
vmovupd %ymm1,0(%rdi)
# qhasm: mem256[statebytes+32] = X4
# asm 1: vmovupd <X4=reg256#1,32(<statebytes=int64#1)
# asm 2: vmovupd <X4=%ymm0,32(<statebytes=%rdi)
vmovupd %ymm0,32(%rdi)
# qhasm: vzeroupper
vzeroupper
# qhasm: caller_r11 = r11_stack
# asm 1: movq <r11_stack=stack64#1,>caller_r11=int64#9
# asm 2: movq <r11_stack=320(%rsp),>caller_r11=%r11
movq 320(%rsp),%r11
# qhasm: caller_r12 = r12_stack
# asm 1: movq <r12_stack=stack64#3,>caller_r12=int64#10
# asm 2: movq <r12_stack=336(%rsp),>caller_r12=%r12
movq 336(%rsp),%r12
# qhasm: caller_r14 = r14_stack
# asm 1: movq <r14_stack=stack64#4,>caller_r14=int64#12
# asm 2: movq <r14_stack=344(%rsp),>caller_r14=%r14
movq 344(%rsp),%r14
# qhasm: caller_r13 = r13_stack
# asm 1: movq <r13_stack=stack64#2,>caller_r13=int64#11
# asm 2: movq <r13_stack=328(%rsp),>caller_r13=%r13
movq 328(%rsp),%r13
# qhasm: caller_r15 = r15_stack
# asm 1: movq <r15_stack=stack64#6,>caller_r15=int64#13
# asm 2: movq <r15_stack=360(%rsp),>caller_r15=%r15
movq 360(%rsp),%r15
# qhasm: caller_rbx = rbx_stack
# asm 1: movq <rbx_stack=stack64#5,>caller_rbx=int64#14
# asm 2: movq <rbx_stack=352(%rsp),>caller_rbx=%rbx
movq 352(%rsp),%rbx
# qhasm: caller_rbp = rbp_stack
# asm 1: movq <rbp_stack=stack64#7,>caller_rbp=int64#15
# asm 2: movq <rbp_stack=368(%rsp),>caller_rbp=%rbp
movq 368(%rsp),%rbp
# qhasm: return inlen
add %r11,%rsp
ret
.section .note.GNU-stack,"",@progbits